diff --git a/.clang-format b/.clang-format index a0a96088c74f49a961a80bc0851a84214b0a9f83..badfc1ba440afc1c87f056ee61c8e38105d60022 100644 --- a/.clang-format +++ b/.clang-format @@ -111,6 +111,7 @@ ForEachMacros: - 'css_for_each_descendant_pre' - 'device_for_each_child_node' - 'dma_fence_chain_for_each' + - 'do_for_each_ftrace_op' - 'drm_atomic_crtc_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane_state' @@ -136,6 +137,7 @@ ForEachMacros: - 'for_each_active_dev_scope' - 'for_each_active_drhd_unit' - 'for_each_active_iommu' + - 'for_each_aggr_pgid' - 'for_each_available_child_of_node' - 'for_each_bio' - 'for_each_board_func_rsrc' @@ -234,6 +236,7 @@ ForEachMacros: - 'for_each_node_state' - 'for_each_node_with_cpus' - 'for_each_node_with_property' + - 'for_each_nonreserved_multicast_dest_pgid' - 'for_each_of_allnodes' - 'for_each_of_allnodes_from' - 'for_each_of_cpu_node' @@ -256,6 +259,7 @@ ForEachMacros: - 'for_each_pci_dev' - 'for_each_pci_msi_entry' - 'for_each_pcm_streams' + - 'for_each_physmem_range' - 'for_each_populated_zone' - 'for_each_possible_cpu' - 'for_each_present_cpu' @@ -265,6 +269,8 @@ ForEachMacros: - 'for_each_process_thread' - 'for_each_property_of_node' - 'for_each_registered_fb' + - 'for_each_requested_gpio' + - 'for_each_requested_gpio_in_range' - 'for_each_reserved_mem_region' - 'for_each_rtd_codec_dais' - 'for_each_rtd_codec_dais_rollback' @@ -278,12 +284,17 @@ ForEachMacros: - 'for_each_sg' - 'for_each_sg_dma_page' - 'for_each_sg_page' + - 'for_each_sgtable_dma_page' + - 'for_each_sgtable_dma_sg' + - 'for_each_sgtable_page' + - 'for_each_sgtable_sg' - 'for_each_sibling_event' - 'for_each_subelement' - 'for_each_subelement_extid' - 'for_each_subelement_id' - '__for_each_thread' - 'for_each_thread' + - 'for_each_unicast_dest_pgid' - 'for_each_wakeup_source' - 'for_each_zone' - 'for_each_zone_zonelist' @@ -464,6 +475,7 @@ ForEachMacros: - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' - 'virtio_device_for_each_vq' + - 'while_for_each_ftrace_op' - 'xa_for_each' - 'xa_for_each_marked' - 'xa_for_each_range' diff --git a/.mailmap b/.mailmap index 57fe0085b465924eef5f0dd42bf43e497e4f0bf4..50096b96c85dfc2eedd28a2f46b86e1822654533 100644 --- a/.mailmap +++ b/.mailmap @@ -15,30 +15,31 @@ Aaron Durbin Adam Oldham Adam Radford -Adrian Bunk Adriana Reus +Adrian Bunk Alan Cox Alan Cox -Aleksey Gorelov Aleksandar Markovic -Alex Shi -Alex Shi +Aleksey Gorelov Alexander Lobakin Alexander Lobakin Alexander Lobakin Alexandre Belloni -Alexei Starovoitov Alexei Starovoitov Alexei Starovoitov +Alexei Starovoitov +Alex Shi +Alex Shi Al Viro Al Viro +Andi Kleen Andi Shyti Andreas Herrmann -Andrey Ryabinin Andrew Morton -Andrew Murray Andrew Murray +Andrew Murray Andrew Vasquez +Andrey Ryabinin Andy Adamson Antoine Tenart Antonio Ospite @@ -48,40 +49,42 @@ Arnaud Patard Arnd Bergmann Axel Dyks Axel Lin -Bart Van Assche Bart Van Assche +Bart Van Assche Ben Gardner Ben M Cahill Björn Steinbrink -Boris Brezillon -Boris Brezillon Boris Brezillon Boris Brezillon +Boris Brezillon +Boris Brezillon Brian Avery Brian King +Changbin Du +Changbin Du Chao Yu Chao Yu -Christoph Hellwig Christophe Ricard +Christoph Hellwig Corey Minyard Damian Hobson-Garcia -Daniel Borkmann -Daniel Borkmann +Daniel Borkmann Daniel Borkmann Daniel Borkmann -Daniel Borkmann +Daniel Borkmann +Daniel Borkmann Daniel Borkmann David Brownell David Woodhouse -Dengcheng Zhu -Dengcheng Zhu Dengcheng Zhu Dengcheng Zhu +Dengcheng Zhu +Dengcheng Zhu Dmitry Eremin-Solenikov -Dmitry Safonov <0x7f454c46@gmail.com> -Dmitry Safonov <0x7f454c46@gmail.com> Dmitry Safonov <0x7f454c46@gmail.com> +Dmitry Safonov <0x7f454c46@gmail.com> +Dmitry Safonov <0x7f454c46@gmail.com> Domen Puncer Douglas Gilbert Ed L. Cashin @@ -92,20 +95,22 @@ Felix Kuhling Felix Moeller Filipe Lautert Franck Bui-Huu -Frank Rowand Frank Rowand Frank Rowand +Frank Rowand Frank Zago Gao Xiang Gao Xiang -Gerald Schaefer Gerald Schaefer +Gerald Schaefer Gerald Schaefer Greg Kroah-Hartman Greg Kroah-Hartman Greg Kroah-Hartman Greg Kurz Gregory CLEMENT +Gustavo Padovan +Gustavo Padovan Hanjun Guo Heiko Carstens Heiko Carstens @@ -115,32 +120,33 @@ Henrik Rydberg Herbert Xu Jacob Shin Jaegeuk Kim -Jaegeuk Kim Jaegeuk Kim +Jaegeuk Kim Jakub Kicinski James Bottomley James Bottomley James E Wilson -James Hogan James Hogan +James Hogan James Ketrenos Jan Glauber Jan Glauber Jan Glauber Jason Gunthorpe +Jason Gunthorpe Jason Gunthorpe -Javi Merino +Javi Merino Jayachandran C Jayachandran C Jayachandran C Jayachandran C -Jean Tourrilhes +Jean Tourrilhes Jeff Garzik -Jeff Layton Jeff Layton Jeff Layton +Jeff Layton Jens Axboe Jens Osterkamp Jiri Slaby @@ -164,30 +170,31 @@ Julien Thierry Kamil Konieczny Kay Sievers Kenneth W Chen -Konstantin Khlebnikov Konstantin Khlebnikov +Konstantin Khlebnikov Koushik -Krzysztof Kozlowski Krzysztof Kozlowski +Krzysztof Kozlowski Kuninori Morimoto -Leon Romanovsky -Leon Romanovsky Leonardo Bras Leonid I Ananiev +Leon Romanovsky +Leon Romanovsky +Leon Romanovsky Linas Vepstas -Linus Lüssing Linus Lüssing -Li Yang +Linus Lüssing Li Yang +Li Yang Lukasz Luba Maciej W. Rozycki -Marc Zyngier Marcin Nowakowski +Marc Zyngier Mark Brown Mark Yao -Martin Kepplinger Martin Kepplinger Martin Kepplinger +Martin Kepplinger Mathieu Othacehe Matthew Wilcox Matthew Wilcox @@ -197,17 +204,17 @@ Matthew Wilcox Matthew Wilcox Matthew Wilcox Matthieu CASTET -Mauro Carvalho Chehab +Matt Ranostay +Matt Ranostay Matthew Ranostay +Matt Ranostay +Matt Redfearn Mauro Carvalho Chehab +Mauro Carvalho Chehab Mauro Carvalho Chehab +Mauro Carvalho Chehab Mauro Carvalho Chehab Mauro Carvalho Chehab -Mauro Carvalho Chehab Mauro Carvalho Chehab -Matt Ranostay Matthew Ranostay -Matt Ranostay -Matt Ranostay -Matt Redfearn Maxime Ripard Maxime Ripard Mayuresh Janorkar @@ -239,13 +246,13 @@ Paolo 'Blaisorblade' Giarrusso Patrick Mochel Paul Burton Paul Burton +Paul E. McKenney Paul E. McKenney Paul E. McKenney -Paul E. McKenney Paul E. McKenney Peter A Jonsson -Peter Oruba Peter Oruba +Peter Oruba Pratyush Anand Praveen BP Punit Agrawal @@ -258,23 +265,23 @@ Ralf Baechle Ralf Wildenhues Randy Dunlap Rémi Denis-Courmont -Ricardo Ribalda Ricardo Ribalda Ricardo Ribalda Ricardo Ribalda Delgado +Ricardo Ribalda Ross Zwisler Rudolf Marek Rui Saraiva Sachin P Sant -Sarangdhar Joshi +Sakari Ailus Sam Ravnborg -Santosh Shilimkar Santosh Shilimkar +Santosh Shilimkar +Sarangdhar Joshi Sascha Hauer S.Çağlar Onur -Sakari Ailus Sean Nyekjaer -Sebastian Reichel Sebastian Reichel +Sebastian Reichel Sedat Dilek Shiraz Hashim Shuah Khan @@ -285,19 +292,23 @@ Simon Arlott Simon Kelley Stéphane Witzmann Stephen Hemminger +Steve Wise +Steve Wise Subash Abhinov Kasiviswanathan Subhash Jadavani Sudeep Holla Sudeep KarkadaNagesha Sumit Semwal +Takashi YOSHII Tejun Heo Thomas Graf Thomas Pedersen Tiezhu Yang Todor Tomov Tony Luck -TripleX Chung TripleX Chung +TripleX Chung Tsuneo Yoshioka +Tycho Andersen Uwe Kleine-König Uwe Kleine-König Uwe Kleine-König @@ -305,22 +316,16 @@ Valdis Kletnieks Vinod Koul Vinod Koul Vinod Koul +Viresh Kumar Viresh Kumar Viresh Kumar -Viresh Kumar Vivien Didelot Vlad Dogaru -Vladimir Davydov Vladimir Davydov -Takashi YOSHII +Vladimir Davydov +WeiXiong Liao Will Deacon -Wolfram Sang Wolfram Sang +Wolfram Sang Yakir Yang Yusuke Goda -Gustavo Padovan -Gustavo Padovan -Changbin Du -Changbin Du -Steve Wise -Steve Wise diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 index f7e32f218f737f753fc6c2d6ea83b064c8f707c2..e82fc37be80225bfe386c75c9e2d1c8bdea4f914 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 @@ -43,7 +43,7 @@ Description: read only This sysfs interface exposes the number of cores per chip present in the system. -What: /sys/devices/hv_24x7/interface/cpumask +What: /sys/devices/hv_24x7/cpumask Date: July 2020 Contact: Linux on PowerPC Developer List Description: read only diff --git a/Documentation/RCU/lockdep.rst b/Documentation/RCU/lockdep.rst index f1fc8ae3846a4058796b12769517372350f9f47d..cc860a0c296be1bdb3f57ac79fa10dbe30fd99db 100644 --- a/Documentation/RCU/lockdep.rst +++ b/Documentation/RCU/lockdep.rst @@ -49,7 +49,7 @@ checking of rcu_dereference() primitives: is invoked by both RCU-sched readers and updaters. srcu_dereference_check(p, c): Use explicit check expression "c" along with - srcu_read_lock_held()(). This is useful in code that + srcu_read_lock_held(). This is useful in code that is invoked by both SRCU readers and updaters. rcu_dereference_raw(p): Don't check. (Use sparingly, if at all.) diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index d336f3f73a4c1b6b308a4ef8c019d91a3b5ecf7d..63fd4e6a014bcce2c161288fed6eb90a64b2b4d6 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -1662,7 +1662,7 @@ 98 block User-mode virtual block device 0 = /dev/ubda First user-mode block device - 16 = /dev/udbb Second user-mode block device + 16 = /dev/ubdb Second user-mode block device ... Partitions are handled in the same way as for IDE diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst index e5a8def45f3f8e9f215219b47976338401ed57e4..6c04aea8f4cd83652fc8de70cb104f2348a1d456 100644 --- a/Documentation/admin-guide/dynamic-debug-howto.rst +++ b/Documentation/admin-guide/dynamic-debug-howto.rst @@ -156,7 +156,6 @@ against. Possible keywords are::: ``line-range`` cannot contain space, e.g. "1-30" is valid range but "1 - 30" is not. - ``module=foo`` combined keyword=value form is interchangably accepted The meanings of each keyword are: diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst index a683976fad6d4e1d22ea7c1e4a503b94977c0d8d..d2795ca6821ec33e070a9a724a4f9d13f6df5cbe 100644 --- a/Documentation/admin-guide/ext4.rst +++ b/Documentation/admin-guide/ext4.rst @@ -489,6 +489,9 @@ Files in /sys/fs/ext4/: multiple of this tuning parameter if the stripe size is not set in the ext4 superblock + mb_max_inode_prealloc + The maximum length of per-inode ext4_prealloc_space list. + mb_max_to_scan The maximum number of extents the multiblock allocator will search to find the best extent. @@ -529,21 +532,21 @@ Files in /sys/fs/ext4/: Ioctls ====== -There is some Ext4 specific functionality which can be accessed by applications -through the system call interfaces. The list of all Ext4 specific ioctls are -shown in the table below. +Ext4 implements various ioctls which can be used by applications to access +ext4-specific functionality. An incomplete list of these ioctls is shown in the +table below. This list includes truly ext4-specific ioctls (``EXT4_IOC_*``) as +well as ioctls that may have been ext4-specific originally but are now supported +by some other filesystem(s) too (``FS_IOC_*``). -Table of Ext4 specific ioctls +Table of Ext4 ioctls - EXT4_IOC_GETFLAGS + FS_IOC_GETFLAGS Get additional attributes associated with inode. The ioctl argument is - an integer bitfield, with bit values described in ext4.h. This ioctl is - an alias for FS_IOC_GETFLAGS. + an integer bitfield, with bit values described in ext4.h. - EXT4_IOC_SETFLAGS + FS_IOC_SETFLAGS Set additional attributes associated with inode. The ioctl argument is - an integer bitfield, with bit values described in ext4.h. This ioctl is - an alias for FS_IOC_SETFLAGS. + an integer bitfield, with bit values described in ext4.h. EXT4_IOC_GETVERSION, EXT4_IOC_GETVERSION_OLD Get the inode i_generation number stored for each inode. The diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index bdc1f33fd3d10a409ec0830ee4cb1770b78af566..a1068742a6df11375e2b47135a19c2c6befa59be 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1233,8 +1233,7 @@ efi= [EFI] Format: { "debug", "disable_early_pci_dma", "nochunk", "noruntime", "nosoftreserve", - "novamap", "no_disable_early_pci_dma", - "old_map" } + "novamap", "no_disable_early_pci_dma" } debug: enable misc debug output. disable_early_pci_dma: disable the busmaster bit on all PCI bridges while in the EFI boot stub. @@ -1251,8 +1250,6 @@ novamap: do not call SetVirtualAddressMap(). no_disable_early_pci_dma: Leave the busmaster bit set on all PCI bridges while in the EFI boot stub - old_map [X86-64]: switch to the old ioremap-based EFI - runtime services mapping. [Needs CONFIG_X86_UV=y] efi_no_storage_paranoia [EFI; X86] Using this parameter you can use more than 50% of diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst index 5e477869df1829c2bd1fc031a9101bb3113a86e2..5fe1ade88c177170fd6af1364d19b5c396eaef77 100644 --- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst +++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst @@ -1434,7 +1434,7 @@ on the feature, restricting the viewing angles. DYTC Lapmode sensor ------------------- +------------------- sysfs: dytc_lapmode diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index 7adef969ffeef4b93d7036ffe7a92a83a88400da..5072e7064d13d5ac8500ee94a97454f82346fd9a 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -123,7 +123,9 @@ Energy-Performance Bias (EPB) knob (otherwise), which means that the processor's internal P-state selection logic is expected to focus entirely on performance. This will override the EPP/EPB setting coming from the ``sysfs`` interface -(see `Energy vs Performance Hints`_ below). +(see `Energy vs Performance Hints`_ below). Moreover, any attempts to change +the EPP/EPB to a value different from 0 ("performance") via ``sysfs`` in this +configuration will be rejected. Also, in this configuration the range of P-states available to the processor's internal P-state selection logic is always restricted to the upper boundary @@ -564,8 +566,8 @@ Energy-Performance Preference (EPP) knob (if supported) or its Energy-Performance Bias (EPB) knob. It is also possible to write a positive integer value between 0 to 255, if the EPP feature is present. If the EPP feature is not present, writing integer value to this attribute is not -supported. In this case, user can use - "/sys/devices/system/cpu/cpu*/power/energy_perf_bias" interface. +supported. In this case, user can use the +"/sys/devices/system/cpu/cpu*/power/energy_perf_bias" interface. [Note that tasks may by migrated from one CPU to another by the scheduler's load-balancing algorithm and if different energy vs performance hints are diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index d46429be334ea2021497b6476c146392c8f1b2ef..7df2465fd108d7554e1ac86b3b941a2b595c4dd5 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -36,6 +36,12 @@ Two sets of Questions and Answers (Q&A) are maintained. bpf_devel_QA +Helper functions +================ + +* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs. + + Program types ============= @@ -79,4 +85,5 @@ Other .. _networking-filter: ../networking/filter.rst .. _man-pages: https://www.kernel.org/doc/man-pages/ .. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html +.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html .. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/ diff --git a/Documentation/devicetree/bindings/clock/imx23-clock.yaml b/Documentation/devicetree/bindings/clock/imx23-clock.yaml index 66cb238a1040cd497aebba9490b6d3c5e301ded4..ad21899981af8869f41eb56233295494054215e0 100644 --- a/Documentation/devicetree/bindings/clock/imx23-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx23-clock.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Clock bindings for Freescale i.MX23 maintainers: - - Shawn Guo + - Shawn Guo description: | The clock consumer should specify the desired clock by having the clock diff --git a/Documentation/devicetree/bindings/clock/imx28-clock.yaml b/Documentation/devicetree/bindings/clock/imx28-clock.yaml index 72328d5ca09acd92d97fb700c96e14158253f0e3..f1af1108129eec4c5ed3718a8f44c169298d251f 100644 --- a/Documentation/devicetree/bindings/clock/imx28-clock.yaml +++ b/Documentation/devicetree/bindings/clock/imx28-clock.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Clock bindings for Freescale i.MX28 maintainers: - - Shawn Guo + - Shawn Guo description: | The clock consumer should specify the desired clock by having the clock diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml b/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml index ccf5b50e798b936a70161208c589c62bfb866ed4..dfa1133f8c5e4eb223b3c4e68cff066a0d52d822 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml +++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale MXS GPIO controller maintainers: - - Shawn Guo + - Shawn Guo - Anson Huang description: | diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml b/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml index d3134ed775fa2dc7c5b0261f101725cd7c472bd3..21ae7bce038ec2b212de6de5c0445aa0edb0120e 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale MXS Inter IC (I2C) Controller maintainers: - - Shawn Guo + - Shawn Guo properties: compatible: diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt deleted file mode 100644 index 7841cb099e139446042d440e995019be75d1a7ef..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt +++ /dev/null @@ -1,66 +0,0 @@ -Texas Instruments K3 Interrupt Aggregator -========================================= - -The Interrupt Aggregator (INTA) provides a centralized machine -which handles the termination of system events to that they can -be coherently processed by the host(s) in the system. A maximum -of 64 events can be mapped to a single interrupt. - - - Interrupt Aggregator - +-----------------------------------------+ - | Intmap VINT | - | +--------------+ +------------+ | - m ------>| | vint | bit | | 0 |.....|63| vint0 | - . | +--------------+ +------------+ | +------+ - . | . . | | HOST | -Globalevents ------>| . . |------>| IRQ | - . | . . | | CTRL | - . | . . | +------+ - n ------>| +--------------+ +------------+ | - | | vint | bit | | 0 |.....|63| vintx | - | +--------------+ +------------+ | - | | - +-----------------------------------------+ - -Configuration of these Intmap registers that maps global events to vint is done -by a system controller (like the Device Memory and Security Controller on K3 -AM654 SoC). Driver should request the system controller to get the range -of global events and vints assigned to the requesting host. Management -of these requested resources should be handled by driver and requests -system controller to map specific global event to vint, bit pair. - -Communication between the host processor running an OS and the system -controller happens through a protocol called TI System Control Interface -(TISCI protocol). For more details refer: -Documentation/devicetree/bindings/arm/keystone/ti,sci.txt - -TISCI Interrupt Aggregator Node: -------------------------------- -- compatible: Must be "ti,sci-inta". -- reg: Should contain registers location and length. -- interrupt-controller: Identifies the node as an interrupt controller -- msi-controller: Identifies the node as an MSI controller. -- interrupt-parent: phandle of irq parent. -- ti,sci: Phandle to TI-SCI compatible System controller node. -- ti,sci-dev-id: TISCI device ID of the Interrupt Aggregator. -- ti,sci-rm-range-vint: Array of TISCI subtype ids representing vints(inta - outputs) range within this INTA, assigned to the - requesting host context. -- ti,sci-rm-range-global-event: Array of TISCI subtype ids representing the - global events range reaching this IA and are assigned - to the requesting host context. - -Example: --------- -main_udmass_inta: interrupt-controller@33d00000 { - compatible = "ti,sci-inta"; - reg = <0x0 0x33d00000 0x0 0x100000>; - interrupt-controller; - msi-controller; - interrupt-parent = <&main_navss_intr>; - ti,sci = <&dmsc>; - ti,sci-dev-id = <179>; - ti,sci-rm-range-vint = <0x0>; - ti,sci-rm-range-global-event = <0x1>; -}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c7cd05656a3e9cf38ac5b66f833ce42d74561502 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/ti,sci-inta.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments K3 Interrupt Aggregator + +maintainers: + - Lokesh Vutla + +allOf: + - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml# + +description: | + The Interrupt Aggregator (INTA) provides a centralized machine + which handles the termination of system events to that they can + be coherently processed by the host(s) in the system. A maximum + of 64 events can be mapped to a single interrupt. + + Interrupt Aggregator + +-----------------------------------------+ + | Intmap VINT | + | +--------------+ +------------+ | + m ------>| | vint | bit | | 0 |.....|63| vint0 | + . | +--------------+ +------------+ | +------+ + . | . . | | HOST | + Globalevents ------>| . . |----->| IRQ | + . | . . | | CTRL | + . | . . | +------+ + n ------>| +--------------+ +------------+ | + | | vint | bit | | 0 |.....|63| vintx | + | +--------------+ +------------+ | + | | + +-----------------------------------------+ + + Configuration of these Intmap registers that maps global events to vint is + done by a system controller (like the Device Memory and Security Controller + on AM654 SoC). Driver should request the system controller to get the range + of global events and vints assigned to the requesting host. Management + of these requested resources should be handled by driver and requests + system controller to map specific global event to vint, bit pair. + + Communication between the host processor running an OS and the system + controller happens through a protocol called TI System Control Interface + (TISCI protocol). + +properties: + compatible: + const: ti,sci-inta + + reg: + maxItems: 1 + + interrupt-controller: true + + msi-controller: true + + ti,interrupt-ranges: + $ref: /schemas/types.yaml#/definitions/uint32-matrix + description: | + Interrupt ranges that converts the INTA output hw irq numbers + to parents's input interrupt numbers. + items: + items: + - description: | + "output_irq" specifies the base for inta output irq + - description: | + "parent's input irq" specifies the base for parent irq + - description: | + "limit" specifies the limit for translation + +required: + - compatible + - reg + - interrupt-controller + - msi-controller + - ti,sci + - ti,sci-dev-id + - ti,interrupt-ranges + +examples: + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + + main_udmass_inta: msi-controller@33d00000 { + compatible = "ti,sci-inta"; + reg = <0x0 0x33d00000 0x0 0x100000>; + interrupt-controller; + msi-controller; + interrupt-parent = <&main_navss_intr>; + ti,sci = <&dmsc>; + ti,sci-dev-id = <179>; + ti,interrupt-ranges = <0 0 256>; + }; + }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt deleted file mode 100644 index 178fca08278feebe56d689542caf4b2a46c72bc1..0000000000000000000000000000000000000000 --- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt +++ /dev/null @@ -1,82 +0,0 @@ -Texas Instruments K3 Interrupt Router -===================================== - -The Interrupt Router (INTR) module provides a mechanism to mux M -interrupt inputs to N interrupt outputs, where all M inputs are selectable -to be driven per N output. An Interrupt Router can either handle edge triggered -or level triggered interrupts and that is fixed in hardware. - - Interrupt Router - +----------------------+ - | Inputs Outputs | - +-------+ | +------+ +-----+ | - | GPIO |----------->| | irq0 | | 0 | | Host IRQ - +-------+ | +------+ +-----+ | controller - | . . | +-------+ - +-------+ | . . |----->| IRQ | - | INTA |----------->| . . | +-------+ - +-------+ | . +-----+ | - | +------+ | N | | - | | irqM | +-----+ | - | +------+ | - | | - +----------------------+ - -There is one register per output (MUXCNTL_N) that controls the selection. -Configuration of these MUXCNTL_N registers is done by a system controller -(like the Device Memory and Security Controller on K3 AM654 SoC). System -controller will keep track of the used and unused registers within the Router. -Driver should request the system controller to get the range of GIC IRQs -assigned to the requesting hosts. It is the drivers responsibility to keep -track of Host IRQs. - -Communication between the host processor running an OS and the system -controller happens through a protocol called TI System Control Interface -(TISCI protocol). For more details refer: -Documentation/devicetree/bindings/arm/keystone/ti,sci.txt - -TISCI Interrupt Router Node: ----------------------------- -Required Properties: -- compatible: Must be "ti,sci-intr". -- ti,intr-trigger-type: Should be one of the following: - 1: If intr supports edge triggered interrupts. - 4: If intr supports level triggered interrupts. -- interrupt-controller: Identifies the node as an interrupt controller -- #interrupt-cells: Specifies the number of cells needed to encode an - interrupt source. The value should be 2. - First cell should contain the TISCI device ID of source - Second cell should contain the interrupt source offset - within the device. -- ti,sci: Phandle to TI-SCI compatible System controller node. -- ti,sci-dst-id: TISCI device ID of the destination IRQ controller. -- ti,sci-rm-range-girq: Array of TISCI subtype ids representing the host irqs - assigned to this interrupt router. Each subtype id - corresponds to a range of host irqs. - -For more details on TISCI IRQ resource management refer: -https://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html - -Example: --------- -The following example demonstrates both interrupt router node and the consumer -node(main gpio) on the AM654 SoC: - -main_intr: interrupt-controller0 { - compatible = "ti,sci-intr"; - ti,intr-trigger-type = <1>; - interrupt-controller; - interrupt-parent = <&gic500>; - #interrupt-cells = <2>; - ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x1>; -}; - -main_gpio0: gpio@600000 { - ... - interrupt-parent = <&main_intr>; - interrupts = <57 256>, <57 257>, <57 258>, - <57 259>, <57 260>, <57 261>; - ... -}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cff6a956afb44e287e9be0d9b15b693e4c6fdaa2 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/ti,sci-intr.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments K3 Interrupt Router + +maintainers: + - Lokesh Vutla + +allOf: + - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml# + +description: | + The Interrupt Router (INTR) module provides a mechanism to mux M + interrupt inputs to N interrupt outputs, where all M inputs are selectable + to be driven per N output. An Interrupt Router can either handle edge + triggered or level triggered interrupts and that is fixed in hardware. + + Interrupt Router + +----------------------+ + | Inputs Outputs | + +-------+ | +------+ +-----+ | + | GPIO |----------->| | irq0 | | 0 | | Host IRQ + +-------+ | +------+ +-----+ | controller + | . . | +-------+ + +-------+ | . . |----->| IRQ | + | INTA |----------->| . . | +-------+ + +-------+ | . +-----+ | + | +------+ | N | | + | | irqM | +-----+ | + | +------+ | + | | + +----------------------+ + + There is one register per output (MUXCNTL_N) that controls the selection. + Configuration of these MUXCNTL_N registers is done by a system controller + (like the Device Memory and Security Controller on K3 AM654 SoC). System + controller will keep track of the used and unused registers within the Router. + Driver should request the system controller to get the range of GIC IRQs + assigned to the requesting hosts. It is the drivers responsibility to keep + track of Host IRQs. + + Communication between the host processor running an OS and the system + controller happens through a protocol called TI System Control Interface + (TISCI protocol). + +properties: + compatible: + const: ti,sci-intr + + ti,intr-trigger-type: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [1, 4] + description: | + Should be one of the following. + 1 = If intr supports edge triggered interrupts. + 4 = If intr supports level triggered interrupts. + + interrupt-controller: true + + '#interrupt-cells': + const: 1 + description: | + The 1st cell should contain interrupt router input hw number. + + ti,interrupt-ranges: + $ref: /schemas/types.yaml#/definitions/uint32-matrix + description: | + Interrupt ranges that converts the INTR output hw irq numbers + to parents's input interrupt numbers. + items: + items: + - description: | + "output_irq" specifies the base for intr output irq + - description: | + "parent's input irq" specifies the base for parent irq + - description: | + "limit" specifies the limit for translation + +required: + - compatible + - ti,intr-trigger-type + - interrupt-controller + - '#interrupt-cells' + - ti,sci + - ti,sci-dev-id + - ti,interrupt-ranges + +examples: + - | + main_gpio_intr: interrupt-controller0 { + compatible = "ti,sci-intr"; + ti,intr-trigger-type = <1>; + interrupt-controller; + interrupt-parent = <&gic500>; + #interrupt-cells = <1>; + ti,sci = <&dmsc>; + ti,sci-dev-id = <131>; + ti,interrupt-ranges = <0 360 32>; + }; diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml index 5887c917d480be82b19cc6c588cacd19503226cd..58fe9d02a781a035c1b35a0a234a98f8652293bf 100644 --- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml @@ -30,9 +30,13 @@ allOf: then: properties: clock-output-names: - items: - - const: clk_out_sd0 - - const: clk_in_sd0 + oneOf: + - items: + - const: clk_out_sd0 + - const: clk_in_sd0 + - items: + - const: clk_out_sd1 + - const: clk_in_sd1 properties: compatible: diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml index 75dc1168d7178ab4dead968c3888681368064d6d..10b45966f1b87652bd4c1ecbdaf0c93011d32403 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale Enhanced Secure Digital Host Controller (eSDHC) for i.MX maintainers: - - Shawn Guo + - Shawn Guo allOf: - $ref: "mmc-controller.yaml" diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt index 0c9cf6a8808c10e516dec5771ae6349754cc03d9..26a8f320a15636a26f0bb583b30fe16898050b44 100644 --- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt +++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt @@ -50,6 +50,8 @@ Optional properties: error caused by stop clock(fifo full) Valid range = [0:0x7]. if not present, default value is 0. applied to compatible "mediatek,mt2701-mmc". +- resets: Phandle and reset specifier pair to softreset line of MSDC IP. +- reset-names: Should be "hrst". Examples: mmc0: mmc@11230000 { diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.yaml b/Documentation/devicetree/bindings/mmc/mxs-mmc.yaml index 1cccc0478d49b853d391fc7569d6a5a0f3aad457..bec8f8c71ff253f8fa0a95e261f3ddfc993243e6 100644 --- a/Documentation/devicetree/bindings/mmc/mxs-mmc.yaml +++ b/Documentation/devicetree/bindings/mmc/mxs-mmc.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale MXS MMC controller maintainers: - - Shawn Guo + - Shawn Guo description: | The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt index 2cf3affa1be705b37a1bf95879980df3ffc4e157..96c0b1440c9c5fe942e99c8f294cddaf5d84625a 100644 --- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt @@ -15,8 +15,15 @@ Required properties: - "nvidia,tegra210-sdhci": for Tegra210 - "nvidia,tegra186-sdhci": for Tegra186 - "nvidia,tegra194-sdhci": for Tegra194 -- clocks : Must contain one entry, for the module clock. - See ../clocks/clock-bindings.txt for details. +- clocks: For Tegra210, Tegra186 and Tegra194 must contain two entries. + One for the module clock and one for the timeout clock. + For all other Tegra devices, must contain a single entry for + the module clock. See ../clocks/clock-bindings.txt for details. +- clock-names: For Tegra210, Tegra186 and Tegra194 must contain the + strings 'sdhci' and 'tmclk' to represent the module and + the timeout clocks, respectively. + For all other Tegra devices must contain the string 'sdhci' + to represent the module clock. - resets : Must contain an entry for each entry in reset-names. See ../reset/reset.txt for details. - reset-names : Must include the following entries: @@ -99,7 +106,7 @@ Optional properties for Tegra210, Tegra186 and Tegra194: Example: sdhci@700b0000 { - compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci"; + compatible = "nvidia,tegra124-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = ; clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; @@ -115,3 +122,22 @@ sdhci@700b0000 { nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>; status = "disabled"; }; + +sdhci@700b0000 { + compatible = "nvidia,tegra210-sdhci"; + reg = <0x0 0x700b0000 0x0 0x200>; + interrupts = ; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; + resets = <&tegra_car 14>; + reset-names = "sdhci"; + pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; + pinctrl-0 = <&sdmmc1_3v3>; + pinctrl-1 = <&sdmmc1_1v8>; + nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>; + nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>; + nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>; + nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>; + status = "disabled"; +}; diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index bf7328aba3305f122345cfb528b7d35c0b8965ea..dab208b5c7c7e007bb40c01c31b8025d61e72120 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -1,4 +1,4 @@ Distributed Switch Architecture Device Tree Bindings ---------------------------------------------------- -See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documenation. +See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documentation. diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 1c4474036d46a9dc406668d852866e3dc2124937..fa2baca8c7262704b4ea461ef5e77f0d37679fd0 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -54,7 +54,8 @@ properties: phy-connection-type: description: - Operation mode of the PHY interface + Specifies interface type between the Ethernet device and a physical + layer (PHY) device. enum: # There is not a standard bus between the MAC and the PHY, # something proprietary is being used to embed the PHY in the diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml index 08678af5ed9364cdf2aabb6c0c833009ee0951fc..8ce5ed8a58dd76e6af8c6bdf1ffb2fda6c986be1 100644 --- a/Documentation/devicetree/bindings/net/renesas,ether.yaml +++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml @@ -59,9 +59,15 @@ properties: clocks: maxItems: 1 - pinctrl-0: true + power-domains: + maxItems: 1 + + resets: + maxItems: 1 - pinctrl-names: true + phy-mode: true + + phy-handle: true renesas,no-ether-link: type: boolean @@ -74,6 +80,11 @@ properties: specify when the Ether LINK signal is active-low instead of normal active-high +patternProperties: + "^ethernet-phy@[0-9a-f]$": + type: object + $ref: ethernet-phy.yaml# + required: - compatible - reg @@ -83,7 +94,8 @@ required: - '#address-cells' - '#size-cells' - clocks - - pinctrl-0 + +additionalProperties: false examples: # Lager board @@ -99,8 +111,6 @@ examples: clocks = <&mstp8_clks R8A7790_CLK_ETHER>; phy-mode = "rmii"; phy-handle = <&phy1>; - pinctrl-0 = <ðer_pins>; - pinctrl-names = "default"; renesas,ether-link-active-low; #address-cells = <1>; #size-cells = <0>; @@ -109,7 +119,5 @@ examples: reg = <1>; interrupt-parent = <&irqc0>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; - pinctrl-0 = <&phy1_pins>; - pinctrl-names = "default"; }; }; diff --git a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml index 64b2c64ca806575bb0b6e35608dddaaff81648db..a1e2be737eec913f42b5bbcdd5a2c6fee88a8749 100644 --- a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml @@ -9,6 +9,14 @@ title: PCIe RC controller on Intel Gateway SoCs maintainers: - Dilip Kota +select: + properties: + compatible: + contains: + const: intel,lgm-pcie + required: + - compatible + properties: compatible: items: diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml b/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml index da68f4a25dd96b311f8994ab8cba9ebd4ce03c1b..8740e076061e3868c3708215b420cca2b782e8bb 100644 --- a/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml +++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale MXS PWM controller maintainers: - - Shawn Guo + - Shawn Guo - Anson Huang properties: diff --git a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt index f5e518d099f2c2057afe360b824c1d88bb2203b5..62d4ed2d7fd79e6eb6ab69647a380fd78a1a34eb 100644 --- a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt +++ b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt @@ -23,8 +23,8 @@ Required properties: - compatible: Must be one of : - "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-qspi" : MSPI+BSPI on BRCMSTB SoCs - "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI + "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on BRCMSTB SoCs + "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi" : Second Instance of MSPI BRCMSTB SoCs "brcm,spi-bcm7425-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI BRCMSTB SoCs @@ -36,8 +36,8 @@ Required properties: BRCMSTB SoCs "brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI BRCMSTB SoCs - "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi" : MSPI+BSPI on Cygnus, NSP - "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi" : NS2 SoCs + "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on Cygnus, NSP + "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi" : NS2 SoCs - reg: Define the bases and ranges of the associated I/O address spaces. @@ -86,7 +86,7 @@ BRCMSTB SoC Example: spi@f03e3400 { #address-cells = <0x1>; #size-cells = <0x0>; - compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-qspi"; + compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi"; reg = <0xf03e0920 0x4 0xf03e3400 0x188 0xf03e3200 0x50>; reg-names = "cs_reg", "mspi", "bspi"; interrupts = <0x6 0x5 0x4 0x3 0x2 0x1 0x0>; @@ -149,7 +149,7 @@ BRCMSTB SoC Example: #address-cells = <1>; #size-cells = <0>; clocks = <&upg_fixed>; - compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-mspi"; + compatible = "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi"; reg = <0xf0416000 0x180>; reg-names = "mspi"; interrupts = <0x14>; @@ -160,7 +160,7 @@ BRCMSTB SoC Example: iProc SoC Example: qspi: spi@18027200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18027200 0x184>, <0x18027000 0x124>, <0x1811c408 0x004>, @@ -191,7 +191,7 @@ iProc SoC Example: NS2 SoC Example: qspi: spi@66470200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; reg = <0x66470200 0x184>, <0x66470000 0x124>, <0x67017408 0x004>, diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml index 1b50cedbfb3ea66abb319abef400f5953f605785..50df1a40bbe349d3ea4186e54ff6357dc27b8dc1 100644 --- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml +++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Freescale (Enhanced) Configurable Serial Peripheral Interface (CSPI/eCSPI) for i.MX maintainers: - - Shawn Guo + - Shawn Guo allOf: - $ref: "/schemas/spi/spi-controller.yaml#" diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml index 22882e769e260f7649b4b354973a38f6f044ac31..312d8fee9dbb81999a6f5e66135138ff3c774f8e 100644 --- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml +++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml @@ -39,6 +39,7 @@ properties: spi common code does not support use of CS signals discontinuously. i.MX8DXL-EVK board only uses CS1 without using CS0. Therefore, add this property to re-config the chipselect value in the LPSPI driver. + type: boolean required: - compatible diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml index aedac1669998216879c2179f02e737b0e09b1e4b..16b57f57d10329ce2fdb5a183233a3e44791a359 100644 --- a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml +++ b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: NXP i.MX Thermal Binding maintainers: - - Shawn Guo + - Shawn Guo - Anson Huang properties: diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a0e9cd9fbcf0ffded1fb1d2809c9a6094c7dfaa --- /dev/null +++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/timer/sifive,clint.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: SiFive Core Local Interruptor + +maintainers: + - Palmer Dabbelt + - Anup Patel + +description: + SiFive (and other RISC-V) SOCs include an implementation of the SiFive + Core Local Interruptor (CLINT) for M-mode timer and M-mode inter-processor + interrupts. It directly connects to the timer and inter-processor interrupt + lines of various HARTs (or CPUs) so RISC-V per-HART (or per-CPU) local + interrupt controller is the parent interrupt controller for CLINT device. + The clock frequency of CLINT is specified via "timebase-frequency" DT + property of "/cpus" DT node. The "timebase-frequency" DT property is + described in Documentation/devicetree/bindings/riscv/cpus.yaml + +properties: + compatible: + items: + - const: sifive,fu540-c000-clint + - const: sifive,clint0 + + description: + Should be "sifive,-clint" and "sifive,clint". + Supported compatible strings are - + "sifive,fu540-c000-clint" for the SiFive CLINT v0 as integrated + onto the SiFive FU540 chip, and "sifive,clint0" for the SiFive + CLINT v0 IP block with no chip integration tweaks. + Please refer to sifive-blocks-ip-versioning.txt for details + + reg: + maxItems: 1 + + interrupts-extended: + minItems: 1 + +additionalProperties: false + +required: + - compatible + - reg + - interrupts-extended + +examples: + - | + timer@2000000 { + compatible = "sifive,fu540-c000-clint", "sifive,clint0"; + interrupts-extended = <&cpu1intc 3 &cpu1intc 7 + &cpu2intc 3 &cpu2intc 7 + &cpu3intc 3 &cpu3intc 7 + &cpu4intc 3 &cpu4intc 7>; + reg = <0x2000000 0x10000>; + }; +... diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index a1e4356cf522f3075b87901b68c6b228b2d13c7a..8f27994d44976cc7fd39f27cf232345e0c50ae36 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1001,7 +1001,7 @@ patternProperties: "^sst,.*": description: Silicon Storage Technology, Inc. "^sstar,.*": - description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd. + description: Xiamen Xingchen(SigmaStar) Technology Co., Ltd. (formerly part of MStar Semiconductor, Inc.) "^st,.*": description: STMicroelectronics diff --git a/Documentation/devicetree/writing-schema.rst b/Documentation/devicetree/writing-schema.rst index 8c74a99f95e23f4bdd304284b4179100a6686c48..16f21e182ff6d19fe91dd8be5150a658de00b23e 100644 --- a/Documentation/devicetree/writing-schema.rst +++ b/Documentation/devicetree/writing-schema.rst @@ -5,7 +5,7 @@ Writing DeviceTree Bindings in json-schema Devicetree bindings are written using json-schema vocabulary. Schema files are written in a JSON compatible subset of YAML. YAML is used instead of JSON as it -considered more human readable and has some advantages such as allowing +is considered more human readable and has some advantages such as allowing comments (Prefixed with '#'). Schema Contents @@ -19,7 +19,7 @@ $id A json-schema unique identifier string. The string must be a valid URI typically containing the binding's filename and path. For DT schema, it must begin with "http://devicetree.org/schemas/". The URL is used in constructing - references to other files specified in schema "$ref" properties. A $ref values + references to other files specified in schema "$ref" properties. A $ref value with a leading '/' will have the hostname prepended. A $ref value a relative path or filename only will be prepended with the hostname and path components of the current schema file's '$id' value. A URL is used even for local files, diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 100bfd22726537e372f362ff10c3ca2db551dd30..13ea0cc0a3fa2eb442f6d2b5ab122b8f47aefc07 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -179,7 +179,7 @@ DMA Fence uABI/Sync File :internal: Indefinite DMA Fences -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~ At various times &dma_fence with an indefinite time until dma_fence_wait() finishes have been proposed. Examples include: diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst index 71c5a40da3203d0a2268dd7beed9f0e0d2cf3e20..ccd677ba7d769d5cb178e16ccee4e34324a07fb9 100644 --- a/Documentation/driver-api/fpga/fpga-bridge.rst +++ b/Documentation/driver-api/fpga/fpga-bridge.rst @@ -6,9 +6,9 @@ API to implement a new FPGA bridge * struct :c:type:`fpga_bridge` — The FPGA Bridge structure * struct :c:type:`fpga_bridge_ops` — Low level Bridge driver ops -* :c:func:`devm_fpga_bridge_create()` — Allocate and init a bridge struct -* :c:func:`fpga_bridge_register()` — Register a bridge -* :c:func:`fpga_bridge_unregister()` — Unregister a bridge +* devm_fpga_bridge_create() — Allocate and init a bridge struct +* fpga_bridge_register() — Register a bridge +* fpga_bridge_unregister() — Unregister a bridge .. kernel-doc:: include/linux/fpga/fpga-bridge.h :functions: fpga_bridge diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst index 576f1945eacd9c0a26544ae1ee21defe55d17656..af5382af1379b878ea2b251a3bf7047b2d03bc97 100644 --- a/Documentation/driver-api/fpga/fpga-mgr.rst +++ b/Documentation/driver-api/fpga/fpga-mgr.rst @@ -104,9 +104,9 @@ API for implementing a new FPGA Manager driver * ``fpga_mgr_states`` — Values for :c:member:`fpga_manager->state`. * struct :c:type:`fpga_manager` — the FPGA manager struct * struct :c:type:`fpga_manager_ops` — Low level FPGA manager driver ops -* :c:func:`devm_fpga_mgr_create` — Allocate and init a manager struct -* :c:func:`fpga_mgr_register` — Register an FPGA manager -* :c:func:`fpga_mgr_unregister` — Unregister an FPGA manager +* devm_fpga_mgr_create() — Allocate and init a manager struct +* fpga_mgr_register() — Register an FPGA manager +* fpga_mgr_unregister() — Unregister an FPGA manager .. kernel-doc:: include/linux/fpga/fpga-mgr.h :functions: fpga_mgr_states diff --git a/Documentation/driver-api/fpga/fpga-programming.rst b/Documentation/driver-api/fpga/fpga-programming.rst index b5484df6ff0f50d5833418fa1739ec1733ba9632..f487ad64dfb980db388252f4cfdaf8b6d9198dac 100644 --- a/Documentation/driver-api/fpga/fpga-programming.rst +++ b/Documentation/driver-api/fpga/fpga-programming.rst @@ -6,9 +6,9 @@ Overview The in-kernel API for FPGA programming is a combination of APIs from FPGA manager, bridge, and regions. The actual function used to -trigger FPGA programming is :c:func:`fpga_region_program_fpga()`. +trigger FPGA programming is fpga_region_program_fpga(). -:c:func:`fpga_region_program_fpga()` uses functionality supplied by +fpga_region_program_fpga() uses functionality supplied by the FPGA manager and bridges. It will: * lock the region's mutex @@ -20,8 +20,8 @@ the FPGA manager and bridges. It will: * release the locks The struct fpga_image_info specifies what FPGA image to program. It is -allocated/freed by :c:func:`fpga_image_info_alloc()` and freed with -:c:func:`fpga_image_info_free()` +allocated/freed by fpga_image_info_alloc() and freed with +fpga_image_info_free() How to program an FPGA using a region ------------------------------------- @@ -84,10 +84,10 @@ will generate that list. Here's some sample code of what to do next:: API for programming an FPGA --------------------------- -* :c:func:`fpga_region_program_fpga` — Program an FPGA -* :c:type:`fpga_image_info` — Specifies what FPGA image to program -* :c:func:`fpga_image_info_alloc()` — Allocate an FPGA image info struct -* :c:func:`fpga_image_info_free()` — Free an FPGA image info struct +* fpga_region_program_fpga() — Program an FPGA +* fpga_image_info() — Specifies what FPGA image to program +* fpga_image_info_alloc() — Allocate an FPGA image info struct +* fpga_image_info_free() — Free an FPGA image info struct .. kernel-doc:: drivers/fpga/fpga-region.c :functions: fpga_region_program_fpga diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst index 0529b2d2231a74d07b7a55acc2ce78a28e27bd15..31118a8ba218fa4dd769f047518c3a99b94e1e70 100644 --- a/Documentation/driver-api/fpga/fpga-region.rst +++ b/Documentation/driver-api/fpga/fpga-region.rst @@ -46,18 +46,18 @@ API to add a new FPGA region ---------------------------- * struct :c:type:`fpga_region` — The FPGA region struct -* :c:func:`devm_fpga_region_create` — Allocate and init a region struct -* :c:func:`fpga_region_register` — Register an FPGA region -* :c:func:`fpga_region_unregister` — Unregister an FPGA region +* devm_fpga_region_create() — Allocate and init a region struct +* fpga_region_register() — Register an FPGA region +* fpga_region_unregister() — Unregister an FPGA region The FPGA region's probe function will need to get a reference to the FPGA Manager it will be using to do the programming. This usually would happen during the region's probe function. -* :c:func:`fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count -* :c:func:`of_fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count, +* fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count +* of_fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count, given a device node. -* :c:func:`fpga_mgr_put` — Put an FPGA manager +* fpga_mgr_put() — Put an FPGA manager The FPGA region will need to specify which bridges to control while programming the FPGA. The region driver can build a list of bridges during probe time @@ -66,11 +66,11 @@ the list of bridges to program just before programming (:c:member:`fpga_region->get_bridges`). The FPGA bridge framework supplies the following APIs to handle building or tearing down that list. -* :c:func:`fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a +* fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a list -* :c:func:`of_fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a +* of_fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a list, given a device node -* :c:func:`fpga_bridges_put` — Given a list of bridges, put them +* fpga_bridges_put() — Given a list of bridges, put them .. kernel-doc:: include/linux/fpga/fpga-region.h :functions: fpga_region diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst index b0bc0c028cc500d242632a653705ea7ac221581c..51b21e00239612052476d897f16b5f95b7a58563 100644 --- a/Documentation/driver-api/iio/core.rst +++ b/Documentation/driver-api/iio/core.rst @@ -11,10 +11,10 @@ Industrial I/O Devices ---------------------- * struct :c:type:`iio_dev` - industrial I/O device -* :c:func:`iio_device_alloc()` - allocate an :c:type:`iio_dev` from a driver -* :c:func:`iio_device_free()` - free an :c:type:`iio_dev` from a driver -* :c:func:`iio_device_register()` - register a device with the IIO subsystem -* :c:func:`iio_device_unregister()` - unregister a device from the IIO +* iio_device_alloc() - allocate an :c:type:`iio_dev` from a driver +* iio_device_free() - free an :c:type:`iio_dev` from a driver +* iio_device_register() - register a device with the IIO subsystem +* iio_device_unregister() - unregister a device from the IIO subsystem An IIO device usually corresponds to a single hardware sensor and it @@ -34,17 +34,17 @@ A typical IIO driver will register itself as an :doc:`I2C <../i2c>` or At probe: -1. Call :c:func:`iio_device_alloc()`, which allocates memory for an IIO device. +1. Call iio_device_alloc(), which allocates memory for an IIO device. 2. Initialize IIO device fields with driver specific information (e.g. device name, device channels). -3. Call :c:func:`iio_device_register()`, this registers the device with the +3. Call iio_device_register(), this registers the device with the IIO core. After this call the device is ready to accept requests from user space applications. At remove, we free the resources allocated in probe in reverse order: -1. :c:func:`iio_device_unregister()`, unregister the device from the IIO core. -2. :c:func:`iio_device_free()`, free the memory allocated for the IIO device. +1. iio_device_unregister(), unregister the device from the IIO core. +2. iio_device_free(), free the memory allocated for the IIO device. IIO device sysfs interface ========================== diff --git a/Documentation/fault-injection/nvme-fault-injection.rst b/Documentation/fault-injection/nvme-fault-injection.rst index cdb2e829228e3efc7104b411cc7323173ac45c5e..1d4427890d75913a2397fa8c510977cbc232a8d2 100644 --- a/Documentation/fault-injection/nvme-fault-injection.rst +++ b/Documentation/fault-injection/nvme-fault-injection.rst @@ -3,7 +3,7 @@ NVMe Fault Injection Linux's fault injection framework provides a systematic way to support error injection via debugfs in the /sys/kernel/debug directory. When enabled, the default NVME_SC_INVALID_OPCODE with no retry will be -injected into the nvme_end_request. Users can change the default status +injected into the nvme_try_complete_req. Users can change the default status code and no retry flag via the debugfs. The list of Generic Command Status can be found in include/linux/nvme.h diff --git a/Documentation/filesystems/affs.rst b/Documentation/filesystems/affs.rst index 7f1a40dce6d3d641aef598f0bdf481f567ea457d..5776cbd5fa53295d97d637df8a8381df43aaa78c 100644 --- a/Documentation/filesystems/affs.rst +++ b/Documentation/filesystems/affs.rst @@ -110,13 +110,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: - R maps to r for user, group and others. On directories, R implies x. - - If both W and D are allowed, w will be set. + - W maps to w. - E maps to x. - - H and P are always retained and ignored under Linux. + - D is ignored. - - A is always reset when a file is written to. + - H, S and P are always retained and ignored under Linux. + + - A is cleared when a file is written to. User id and group id will be used unless set[gu]id are given as mount options. Since most of the Amiga file systems are single user systems @@ -128,11 +130,13 @@ Linux -> Amiga: The Linux rwxrwxrwx file mode is handled as follows: - - r permission will set R for user, group and others. + - r permission will allow R for user, group and others. + + - w permission will allow W for user, group and others. - - w permission will set W and D for user, group and others. + - x permission of the user will allow E for plain files. - - x permission of the user will set E for plain files. + - D will be allowed for user, group and others. - All other flags (suid, sgid, ...) are ignored and will not be retained. diff --git a/Documentation/filesystems/ext4/about.rst b/Documentation/filesystems/ext4/about.rst index 0aadba0522644c80549b389600fee4f4f047517e..cc76b577d2f42f8af558bdaa02a97a4c1793bb92 100644 --- a/Documentation/filesystems/ext4/about.rst +++ b/Documentation/filesystems/ext4/about.rst @@ -39,6 +39,6 @@ entry. Other References ---------------- -Also see http://www.nongnu.org/ext2-doc/ for quite a collection of +Also see https://www.nongnu.org/ext2-doc/ for quite a collection of information about ext2/3. Here's another old reference: http://wiki.osdev.org/Ext2 diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst index 17112352f605e087a580043654095d1ffffca16b..29ca5f5feb35d07590c4495d719f1f61f862577b 100644 --- a/Documentation/gpu/amdgpu.rst +++ b/Documentation/gpu/amdgpu.rst @@ -153,7 +153,7 @@ This section covers hwmon and power/thermal controls. HWMON Interfaces ---------------- -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: hwmon GPU sysfs Power State Interfaces @@ -164,48 +164,54 @@ GPU power controls are exposed via sysfs files. power_dpm_state ~~~~~~~~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: power_dpm_state power_dpm_force_performance_level ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: power_dpm_force_performance_level pp_table ~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: pp_table pp_od_clk_voltage ~~~~~~~~~~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: pp_od_clk_voltage pp_dpm_* ~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie pp_power_profile_mode ~~~~~~~~~~~~~~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: pp_power_profile_mode *_busy_percent ~~~~~~~~~~~~~~ -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: gpu_busy_percent -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: mem_busy_percent +gpu_metrics +~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c + :doc: gpu_metrics + GPU Product Information ======================= @@ -233,7 +239,7 @@ serial_number unique_id --------- -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: unique_id GPU Memory Usage Information @@ -283,7 +289,7 @@ PCIe Accounting Information pcie_bw ------- -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c :doc: pcie_bw pcie_replay_count diff --git a/Documentation/hwmon/abituguru-datasheet.rst b/Documentation/hwmon/abituguru-datasheet.rst index 6d5253e2223ba66cdb0d538913ff66d6a99aed7e..0cd61471d2a22c473f4a7f0da8c17bad47496b94 100644 --- a/Documentation/hwmon/abituguru-datasheet.rst +++ b/Documentation/hwmon/abituguru-datasheet.rst @@ -68,7 +68,7 @@ See below for all known bank addresses, numbers of sensors in that bank, number of bytes data per sensor and contents/meaning of those bytes. Although both this document and the kernel driver have kept the sensor -terminoligy for the addressing within a bank this is not 100% correct, in +terminology for the addressing within a bank this is not 100% correct, in bank 0x24 for example the addressing within the bank selects a PWM output not a sensor. @@ -155,7 +155,7 @@ After wider testing of the Linux kernel driver some variants of the uGuru have turned up which do not hold 0x08 at DATA within 250 reads after writing the bank address. With these versions this happens quite frequent, using larger timeouts doesn't help, they just go offline for a second or 2, doing some -internal callibration or whatever. Your code should be prepared to handle +internal calibration or whatever. Your code should be prepared to handle this and in case of no response in this specific case just goto sleep for a while and then retry. @@ -331,6 +331,6 @@ the voltage / clock programming out, I tried reading and only reading banks 0-0x30 with the reading code used for the sensor banks (0x20-0x28) and this resulted in a _permanent_ reprogramming of the voltages, luckily I had the sensors part configured so that it would shutdown my system on any out of spec -voltages which proprably safed my computer (after a reboot I managed to +voltages which probably safed my computer (after a reboot I managed to immediately enter the bios and reload the defaults). This probably means that the read/write cycle for the non sensor part is different from the sensor part. diff --git a/Documentation/hwmon/abituguru.rst b/Documentation/hwmon/abituguru.rst index d8243c827de995cd7d39cd71604808da934ceca1..cfda60b757ce53662c13564c0347bb2d246d6207 100644 --- a/Documentation/hwmon/abituguru.rst +++ b/Documentation/hwmon/abituguru.rst @@ -17,7 +17,7 @@ Supported chips: Note: The uGuru is a microcontroller with onboard firmware which programs it to behave as a hwmon IC. There are many different revisions of the - firmware and thus effectivly many different revisions of the uGuru. + firmware and thus effectively many different revisions of the uGuru. Below is an incomplete list with which revisions are used for which Motherboards: @@ -33,7 +33,7 @@ Supported chips: sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's this does not always work. For these uGuru's the autodetection can be overridden with the bank1_types module param. For all 3 known - revison 1 motherboards the correct use of this param is: + revision 1 motherboards the correct use of this param is: bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1 You may also need to specify the fan_sensors option for these boards fan_sensors=5 diff --git a/Documentation/hwmon/abituguru3.rst b/Documentation/hwmon/abituguru3.rst index 514f11f41e8b523025f31482d081c9d09bad503c..88046d8663857978ea530a09725c0bb4996c0701 100644 --- a/Documentation/hwmon/abituguru3.rst +++ b/Documentation/hwmon/abituguru3.rst @@ -13,7 +13,7 @@ Supported chips: Note: The uGuru is a microcontroller with onboard firmware which programs it to behave as a hwmon IC. There are many different revisions of the - firmware and thus effectivly many different revisions of the uGuru. + firmware and thus effectively many different revisions of the uGuru. Below is an incomplete list with which revisions are used for which Motherboards: @@ -24,7 +24,7 @@ Supported chips: - uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X, AW9D-MAX) - The abituguru3 driver is only for revison 3.0.x.x motherboards, + The abituguru3 driver is only for revision 3.0.x.x motherboards, this driver will not work on older motherboards. For older motherboards use the abituguru (without the 3 !) driver. diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index 2aac50b97921f09e9d777a4c39257f30f163efec..334df758dce360e3b49605c0e7527d19a2baa47c 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -23,8 +23,8 @@ supports C and the GNU C extensions required by the kernel, and is pronounced Clang ----- -The compiler used can be swapped out via `CC=` command line argument to `make`. -`CC=` should be set when selecting a config and during a build. +The compiler used can be swapped out via ``CC=`` command line argument to ``make``. +``CC=`` should be set when selecting a config and during a build. :: make CC=clang defconfig @@ -34,33 +34,33 @@ Cross Compiling --------------- A single Clang compiler binary will typically contain all supported backends, -which can help simplify cross compiling. +which can help simplify cross compiling. :: ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang -`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead -`CROSS_COMPILE` is used to set a command line flag: `--target `. For -example: +``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead +``CROSS_COMPILE`` is used to set a command line flag: ``--target ``. For +example: :: clang --target aarch64-linux-gnu foo.c LLVM Utilities -------------- -LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1` -to enable them. +LLVM has substitutes for GNU binutils utilities. Kbuild supports ``LLVM=1`` +to enable them. :: make LLVM=1 -They can be enabled individually. The full list of the parameters: +They can be enabled individually. The full list of the parameters: :: - make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ - OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ - READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ + make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \ + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \ + READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \ HOSTLD=ld.lld Currently, the integrated assembler is disabled by default. You can pass -`LLVM_IAS=1` to enable it. +``LLVM_IAS=1`` to enable it. Getting Help ------------ diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index b81b8913a5a3714c60ef475ea2901bfe9c281808..58d513a0fa95d706add7d02b60c183994d6d4047 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -16,7 +16,7 @@ This document describes the Linux kernel Makefiles. --- 3.5 Library file goals - lib-y --- 3.6 Descending down in directories --- 3.7 Compilation flags - --- 3.8 Command line dependency + --- 3.8 --- 3.9 Dependency tracking --- 3.10 Special Rules --- 3.11 $(CC) support functions @@ -39,8 +39,8 @@ This document describes the Linux kernel Makefiles. === 7 Architecture Makefiles --- 7.1 Set variables to tweak the build to the architecture - --- 7.2 Add prerequisites to archheaders: - --- 7.3 Add prerequisites to archprepare: + --- 7.2 Add prerequisites to archheaders + --- 7.3 Add prerequisites to archprepare --- 7.4 List directories to visit when descending --- 7.5 Architecture-specific boot images --- 7.6 Building non-kbuild targets @@ -129,7 +129,7 @@ The preferred name for the kbuild files are 'Makefile' but 'Kbuild' can be used and if both a 'Makefile' and a 'Kbuild' file exists, then the 'Kbuild' file will be used. -Section 3.1 "Goal definitions" is a quick intro, further chapters provide +Section 3.1 "Goal definitions" is a quick intro; further chapters provide more details, with real examples. 3.1 Goal definitions @@ -965,7 +965,7 @@ When kbuild executes, the following steps are followed (roughly): KBUILD_LDFLAGS := -m elf_s390 Note: ldflags-y can be used to further customise - the flags used. See chapter 3.7. + the flags used. See section 3.7. LDFLAGS_vmlinux Options for $(LD) when linking vmlinux @@ -1121,7 +1121,7 @@ When kbuild executes, the following steps are followed (roughly): In this example, the file target maketools will be processed before descending down in the subdirectories. - See also chapter XXX-TODO that describe how kbuild supports + See also chapter XXX-TODO that describes how kbuild supports generating offset header files. @@ -1261,7 +1261,7 @@ When kbuild executes, the following steps are followed (roughly): always be built. Assignments to $(targets) are without $(obj)/ prefix. if_changed may be used in conjunction with custom commands as - defined in 6.8 "Custom kbuild commands". + defined in 7.8 "Custom kbuild commands". Note: It is a typical mistake to forget the FORCE prerequisite. Another common pitfall is that whitespace is sometimes @@ -1411,7 +1411,7 @@ When kbuild executes, the following steps are followed (roughly): that may be shared between individual architectures. The recommended approach how to use a generic header file is to list the file in the Kbuild file. - See "7.2 generic-y" for further info on syntax etc. + See "8.2 generic-y" for further info on syntax etc. 7.11 Post-link pass ------------------- @@ -1601,4 +1601,4 @@ is the right choice. - Describe how kbuild supports shipped files with _shipped. - Generating offset header files. -- Add more variables to section 7? +- Add more variables to chapters 7 or 9? diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst index 4cefed8048ca42c473148c113529c11ffbb5d9ba..ddada4a53749364b86a9d11f11d2dcf4b7470ca6 100644 --- a/Documentation/locking/locktypes.rst +++ b/Documentation/locking/locktypes.rst @@ -164,14 +164,14 @@ by disabling preemption or interrupts. On non-PREEMPT_RT kernels local_lock operations map to the preemption and interrupt disabling and enabling primitives: - =========================== ====================== - local_lock(&llock) preempt_disable() - local_unlock(&llock) preempt_enable() - local_lock_irq(&llock) local_irq_disable() - local_unlock_irq(&llock) local_irq_enable() - local_lock_save(&llock) local_irq_save() - local_lock_restore(&llock) local_irq_save() - =========================== ====================== + =============================== ====================== + local_lock(&llock) preempt_disable() + local_unlock(&llock) preempt_enable() + local_lock_irq(&llock) local_irq_disable() + local_unlock_irq(&llock) local_irq_enable() + local_lock_irqsave(&llock) local_irq_save() + local_unlock_irqrestore(&llock) local_irq_restore() + =============================== ====================== The named scope of local_lock has two advantages over the regular primitives: @@ -353,14 +353,14 @@ protection scope. So the following substitution is wrong:: { local_irq_save(flags); -> local_lock_irqsave(&local_lock_1, flags); func3(); - local_irq_restore(flags); -> local_lock_irqrestore(&local_lock_1, flags); + local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock_1, flags); } func2() { local_irq_save(flags); -> local_lock_irqsave(&local_lock_2, flags); func3(); - local_irq_restore(flags); -> local_lock_irqrestore(&local_lock_2, flags); + local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock_2, flags); } func3() @@ -379,14 +379,14 @@ PREEMPT_RT-specific semantics of spinlock_t. The correct substitution is:: { local_irq_save(flags); -> local_lock_irqsave(&local_lock, flags); func3(); - local_irq_restore(flags); -> local_lock_irqrestore(&local_lock, flags); + local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock, flags); } func2() { local_irq_save(flags); -> local_lock_irqsave(&local_lock, flags); func3(); - local_irq_restore(flags); -> local_lock_irqrestore(&local_lock, flags); + local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock, flags); } func3() diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst index 227f427118e816896d0407f4178673dbc884f9f8..b7a627d6c97dc672c7060c30d632c8c1384d1b01 100644 --- a/Documentation/maintainer/maintainer-entry-profile.rst +++ b/Documentation/maintainer/maintainer-entry-profile.rst @@ -101,3 +101,4 @@ to do something different in the near future. ../doc-guide/maintainer-profile ../nvdimm/maintainer-entry-profile + ../riscv/patch-acceptance diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst index 24168b0d16bd2d76320a5cb23b18ba3281e0777a..adc314639085b273d9e99dd3fab97f387e4bffaf 100644 --- a/Documentation/networking/bonding.rst +++ b/Documentation/networking/bonding.rst @@ -2860,17 +2860,6 @@ version of the linux kernel, found on http://kernel.org The latest version of this document can be found in the latest kernel source (named Documentation/networking/bonding.rst). -Discussions regarding the usage of the bonding driver take place on the -bonding-devel mailing list, hosted at sourceforge.net. If you have questions or -problems, post them to the list. The list address is: - -bonding-devel@lists.sourceforge.net - -The administrative interface (to subscribe or unsubscribe) can -be found at: - -https://lists.sourceforge.net/lists/listinfo/bonding-devel - Discussions regarding the development of the bonding driver take place on the main Linux network mailing list, hosted at vger.kernel.org. The list address is: @@ -2881,10 +2870,3 @@ The administrative interface (to subscribe or unsubscribe) can be found at: http://vger.kernel.org/vger-lists.html#netdev - -Donald Becker's Ethernet Drivers and diag programs may be found at : - - - http://web.archive.org/web/%2E/http://www.scyld.com/network/ - -You will also find a lot of information regarding Ethernet, NWay, MII, -etc. at www.scyld.com. diff --git a/Documentation/networking/dsa/configuration.rst b/Documentation/networking/dsa/configuration.rst index af029b3ca2abe58ba54bff2fdbc7363248fa6a36..11bd5e6108c0049ce307b8d7f843c98dfe91571d 100644 --- a/Documentation/networking/dsa/configuration.rst +++ b/Documentation/networking/dsa/configuration.rst @@ -180,7 +180,7 @@ The configuration can only be set up via VLAN tagging and bridge setup. # bring up the slave interfaces ip link set lan1 up - ip link set lan1 up + ip link set lan2 up ip link set lan3 up # create bridge diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst index 46caaadbb0290fe66eed0081ef8a62c5584ec695..379817ca64d2b729e36e8de4ba518df26f6cec23 100644 --- a/Documentation/powerpc/syscall64-abi.rst +++ b/Documentation/powerpc/syscall64-abi.rst @@ -49,16 +49,18 @@ Register preservation rules Register preservation rules match the ELF ABI calling sequence with the following differences: -=========== ============= ======================================== --- For the sc instruction, differences with the ELF ABI --- +=========== ============= ======================================== r0 Volatile (System call number.) r3 Volatile (Parameter 1, and return value.) r4-r8 Volatile (Parameters 2-6.) cr0 Volatile (cr0.SO is the return error condition.) cr1, cr5-7 Nonvolatile lr Nonvolatile +=========== ============= ======================================== --- For the scv 0 instruction, differences with the ELF ABI --- +=========== ============= ======================================== r0 Volatile (System call number.) r3 Volatile (Parameter 1, and return value.) r4-r8 Volatile (Parameters 2-6.) diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst index 4a9aa4f0681e5de1b85e1000b54f3aee87f446e1..918e32d76fc44e42cbd76d0df473e144a04ff62e 100644 --- a/Documentation/process/deprecated.rst +++ b/Documentation/process/deprecated.rst @@ -142,7 +142,7 @@ only NUL-terminated strings. The safe replacement is strscpy(). (Users of strscpy() still needing NUL-padding should instead use strscpy_pad().) -If a caller is using non-NUL-terminated strings, strncpy()() can +If a caller is using non-NUL-terminated strings, strncpy() can still be used, but destinations should be marked with the `__nonstring `_ attribute to avoid future compiler warnings. diff --git a/Documentation/sound/cards/audigy-mixer.rst b/Documentation/sound/cards/audigy-mixer.rst index 998f76e19cdd69788701ba6a1cb2a359268e396a..f3f4640ee2afc6242b1dbe6a47253530ad8a4358 100644 --- a/Documentation/sound/cards/audigy-mixer.rst +++ b/Documentation/sound/cards/audigy-mixer.rst @@ -332,7 +332,7 @@ WO 9901953 (A1) US Patents (https://www.uspto.gov/) ----------------------------------- +----------------------------------- US 5925841 Digital Sampling Instrument employing cache memory (Jul. 20, 1999) diff --git a/Documentation/sound/cards/sb-live-mixer.rst b/Documentation/sound/cards/sb-live-mixer.rst index eccb0f0ffd0ff865c216bf584ed3e2c4471936f4..2ce41d3822d8b9b148d00df721ba991527f49397 100644 --- a/Documentation/sound/cards/sb-live-mixer.rst +++ b/Documentation/sound/cards/sb-live-mixer.rst @@ -337,7 +337,7 @@ WO 9901953 (A1) US Patents (https://www.uspto.gov/) ----------------------------------- +----------------------------------- US 5925841 Digital Sampling Instrument employing cache memory (Jul. 20, 1999) diff --git a/Documentation/sound/designs/timestamping.rst b/Documentation/sound/designs/timestamping.rst index 2b0fff503415199d93092152b08c45cb5a796174..7c7ecf5dbc4bd75be49d0ed5953fb16376c3aa4a 100644 --- a/Documentation/sound/designs/timestamping.rst +++ b/Documentation/sound/designs/timestamping.rst @@ -143,7 +143,7 @@ timestamp shows when the information is put together by the driver before returning from the ``STATUS`` and ``STATUS_EXT`` ioctl. in most cases this driver_timestamp will be identical to the regular system tstamp. -Examples of typestamping with HDaudio: +Examples of timestamping with HDAudio: 1. DMA timestamp, no compensation for DMA+analog delay :: diff --git a/Documentation/translations/it_IT/process/deprecated.rst b/Documentation/translations/it_IT/process/deprecated.rst index e108eaf82cf6719ddc02817c6bb83c7907fd54bd..a642ff3fdc8bbdfee5e7720891140dd69e90e411 100644 --- a/Documentation/translations/it_IT/process/deprecated.rst +++ b/Documentation/translations/it_IT/process/deprecated.rst @@ -130,7 +130,7 @@ chi usa solo stringe terminate. La versione sicura da usare è strscpy(). (chi usa strscpy() e necessita di estendere la terminazione con NUL deve aggiungere una chiamata a memset()) -Se il chiamate no usa stringhe terminate con NUL, allore strncpy()() +Se il chiamate no usa stringhe terminate con NUL, allore strncpy() può continuare ad essere usata, ma i buffer di destinazione devono essere marchiati con l'attributo `__nonstring `_ per evitare avvisi durante la compilazione. diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index eb3a1316f03ec91fc1a310b29ad2191493bf102f..d2b733dc7892cb515c17d2640dee38081ee36591 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6130,7 +6130,7 @@ HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx. 8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH ----------------------------------- -:Architecture: x86 +:Architectures: x86 This capability indicates that KVM running on top of Hyper-V hypervisor enables Direct TLB flush for its guests meaning that TLB flush @@ -6143,19 +6143,33 @@ in CPUID and only exposes Hyper-V identification. In this case, guest thinks it's running on Hyper-V and only use Hyper-V hypercalls. 8.22 KVM_CAP_S390_VCPU_RESETS +----------------------------- -Architectures: s390 +:Architectures: s390 This capability indicates that the KVM_S390_NORMAL_RESET and KVM_S390_CLEAR_RESET ioctls are available. 8.23 KVM_CAP_S390_PROTECTED +--------------------------- -Architecture: s390 - +:Architectures: s390 This capability indicates that the Ultravisor has been initialized and KVM can therefore start protected VMs. This capability governs the KVM_S390_PV_COMMAND ioctl and the KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected guests when the state change is invalid. + +8.24 KVM_CAP_STEAL_TIME +----------------------- + +:Architectures: arm64, x86 + +This capability indicates that KVM supports steal time accounting. +When steal time accounting is supported it may be enabled with +architecture-specific interfaces. This capability and the architecture- +specific interfaces must be consistent, i.e. if one says the feature +is supported, than the other should as well and vice versa. For arm64 +see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL". +For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME". diff --git a/MAINTAINERS b/MAINTAINERS index 557d79a5b9f139fdcab6a15fa943b26cf3fe6ce6..c4e8bd1ab7616868959983435148a93e6bd33026 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1694,7 +1694,6 @@ F: arch/arm/mach-cns3xxx/ ARM/CAVIUM THUNDER NETWORK DRIVER M: Sunil Goutham -M: Robert Richter L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/net/ethernet/cavium/thunder/ @@ -3205,6 +3204,7 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git F: block/ F: drivers/block/ +F: include/linux/blk* F: kernel/trace/blktrace.c F: lib/sbitmap.c @@ -3388,6 +3388,7 @@ M: Florian Fainelli L: netdev@vger.kernel.org L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Supported +F: Documentation/devicetree/bindings/net/dsa/b53.txt F: drivers/net/dsa/b53/* F: include/linux/platform_data/b53.h @@ -3573,13 +3574,28 @@ L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: drivers/phy/broadcom/phy-brcm-usb* +BROADCOM ETHERNET PHY DRIVERS +M: Florian Fainelli +L: bcm-kernel-feedback-list@broadcom.com +L: netdev@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt +F: drivers/net/phy/bcm*.[ch] +F: drivers/net/phy/broadcom.c +F: include/linux/brcmphy.h + BROADCOM GENET ETHERNET DRIVER M: Doug Berger M: Florian Fainelli L: bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported +F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt +F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt F: drivers/net/ethernet/broadcom/genet/ +F: drivers/net/mdio/mdio-bcm-unimac.c +F: include/linux/platform_data/bcmgenet.h +F: include/linux/platform_data/mdio-bcm-unimac.h BROADCOM IPROC ARM ARCHITECTURE M: Ray Jui @@ -3931,8 +3947,8 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/carl9170 F: drivers/net/wireless/ath/carl9170/ CAVIUM I2C DRIVER -M: Robert Richter -S: Supported +M: Robert Richter +S: Odd Fixes W: http://www.marvell.com F: drivers/i2c/busses/i2c-octeon* F: drivers/i2c/busses/i2c-thunderx* @@ -3947,8 +3963,8 @@ W: http://www.marvell.com F: drivers/net/ethernet/cavium/liquidio/ CAVIUM MMC DRIVER -M: Robert Richter -S: Supported +M: Robert Richter +S: Odd Fixes W: http://www.marvell.com F: drivers/mmc/host/cavium* @@ -3960,9 +3976,9 @@ W: http://www.marvell.com F: drivers/crypto/cavium/cpt/ CAVIUM THUNDERX2 ARM64 SOC -M: Robert Richter +M: Robert Richter L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained +S: Odd Fixes F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt F: arch/arm64/boot/dts/cavium/thunder2-99xx* @@ -4241,6 +4257,8 @@ S: Maintained F: .clang-format CLANG/LLVM BUILD SUPPORT +M: Nathan Chancellor +M: Nick Desaulniers L: clang-built-linux@googlegroups.com S: Supported W: https://clangbuiltlinux.github.io/ @@ -5050,7 +5068,7 @@ F: include/linux/dm-*.h F: include/uapi/linux/dm-*.h DEVLINK -M: Jiri Pirko +M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: Documentation/networking/devlink @@ -5239,6 +5257,7 @@ DOCUMENTATION M: Jonathan Corbet L: linux-doc@vger.kernel.org S: Maintained +P: Documentation/doc-guide/maintainer-profile.rst T: git git://git.lwn.net/linux.git docs-next F: Documentation/ F: scripts/documentation-file-ref-check @@ -6089,7 +6108,7 @@ F: include/linux/dynamic_debug.h F: lib/dynamic_debug.c DYNAMIC INTERRUPT MODERATION -M: Tal Gilboa +M: Tal Gilboa S: Maintained F: Documentation/networking/net_dim.rst F: include/linux/dim.h @@ -6169,7 +6188,7 @@ F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt F: drivers/edac/aspeed_edac.c EDAC-BLUEFIELD -M: Shravan Kumar Ramani +M: Shravan Kumar Ramani S: Supported F: drivers/edac/bluefield_edac.c @@ -6181,16 +6200,15 @@ F: drivers/edac/highbank* EDAC-CAVIUM OCTEON M: Ralf Baechle -M: Robert Richter L: linux-edac@vger.kernel.org L: linux-mips@vger.kernel.org S: Supported F: drivers/edac/octeon_edac* EDAC-CAVIUM THUNDERX -M: Robert Richter +M: Robert Richter L: linux-edac@vger.kernel.org -S: Supported +S: Odd Fixes F: drivers/edac/thunderx_edac* EDAC-CORE @@ -6198,7 +6216,7 @@ M: Borislav Petkov M: Mauro Carvalho Chehab M: Tony Luck R: James Morse -R: Robert Richter +R: Robert Richter L: linux-edac@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next @@ -6491,8 +6509,8 @@ S: Odd Fixes F: drivers/net/ethernet/agere/ ETHERNET BRIDGE -M: Roopa Prabhu -M: Nikolay Aleksandrov +M: Roopa Prabhu +M: Nikolay Aleksandrov L: bridge@lists.linux-foundation.org (moderated for non-subscribers) L: netdev@vger.kernel.org S: Maintained @@ -6502,7 +6520,6 @@ F: net/bridge/ ETHERNET PHY LIBRARY M: Andrew Lunn -M: Florian Fainelli M: Heiner Kallweit R: Russell King L: netdev@vger.kernel.org @@ -6607,7 +6624,7 @@ F: drivers/iommu/exynos-iommu.c EZchip NPS platform support M: Vineet Gupta -M: Ofer Levi +M: Ofer Levi S: Supported F: arch/arc/boot/dts/eznps.dts F: arch/arc/plat-eznps @@ -6892,6 +6909,14 @@ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/dma/fsldma.* +FREESCALE DSPI DRIVER +M: Vladimir Oltean +L: linux-spi@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt +F: drivers/spi/spi-fsl-dspi.c +F: include/linux/spi/spi-fsl-dspi.h + FREESCALE ENETC ETHERNET DRIVERS M: Claudiu Manoil L: netdev@vger.kernel.org @@ -8263,7 +8288,7 @@ IA64 (Itanium) PLATFORM M: Tony Luck M: Fenghua Yu L: linux-ia64@vger.kernel.org -S: Maintained +S: Odd Fixes T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git F: Documentation/ia64/ F: arch/ia64/ @@ -8571,7 +8596,7 @@ F: drivers/iio/pressure/dps310.c INFINIBAND SUBSYSTEM M: Doug Ledford -M: Jason Gunthorpe +M: Jason Gunthorpe L: linux-rdma@vger.kernel.org S: Supported W: https://github.com/linux-rdma/rdma-core @@ -9234,7 +9259,7 @@ F: drivers/firmware/iscsi_ibft* ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR M: Sagi Grimberg -M: Max Gurtovoy +M: Max Gurtovoy L: linux-rdma@vger.kernel.org S: Supported W: http://www.openfabrics.org @@ -9783,7 +9808,7 @@ F: drivers/scsi/53c700* LEAKING_ADDRESSES M: Tobin C. Harding -M: Tycho Andersen +M: Tycho Andersen L: kernel-hardening@lists.openwall.com S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git @@ -11080,7 +11105,7 @@ F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt F: drivers/input/touchscreen/melfas_mip4.c MELLANOX ETHERNET DRIVER (mlx4_en) -M: Tariq Toukan +M: Tariq Toukan L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11088,7 +11113,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx4/en_* MELLANOX ETHERNET DRIVER (mlx5e) -M: Saeed Mahameed +M: Saeed Mahameed L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11096,7 +11121,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_* MELLANOX ETHERNET INNOVA DRIVERS -R: Boris Pismenny +R: Boris Pismenny L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11107,8 +11132,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h MELLANOX ETHERNET SWITCH DRIVERS -M: Jiri Pirko -M: Ido Schimmel +M: Jiri Pirko +M: Ido Schimmel L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11117,7 +11142,7 @@ F: drivers/net/ethernet/mellanox/mlxsw/ F: tools/testing/selftests/drivers/net/mlxsw/ MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) -M: mlxsw@mellanox.com +M: mlxsw@nvidia.com L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11127,7 +11152,7 @@ F: drivers/net/ethernet/mellanox/mlxfw/ MELLANOX HARDWARE PLATFORM SUPPORT M: Andy Shevchenko M: Darren Hart -M: Vadim Pasternak +M: Vadim Pasternak L: platform-driver-x86@vger.kernel.org S: Supported F: Documentation/ABI/testing/sysfs-platform-mellanox-bootctl @@ -11135,7 +11160,7 @@ F: drivers/platform/mellanox/ F: include/linux/platform_data/mlxreg.h MELLANOX MLX4 core VPI driver -M: Tariq Toukan +M: Tariq Toukan L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org S: Supported @@ -11145,7 +11170,7 @@ F: drivers/net/ethernet/mellanox/mlx4/ F: include/linux/mlx4/ MELLANOX MLX4 IB driver -M: Yishai Hadas +M: Yishai Hadas L: linux-rdma@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11155,8 +11180,8 @@ F: include/linux/mlx4/ F: include/uapi/rdma/mlx4-abi.h MELLANOX MLX5 core VPI driver -M: Saeed Mahameed -M: Leon Romanovsky +M: Saeed Mahameed +M: Leon Romanovsky L: netdev@vger.kernel.org L: linux-rdma@vger.kernel.org S: Supported @@ -11167,7 +11192,7 @@ F: drivers/net/ethernet/mellanox/mlx5/core/ F: include/linux/mlx5/ MELLANOX MLX5 IB driver -M: Leon Romanovsky +M: Leon Romanovsky L: linux-rdma@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -11177,8 +11202,8 @@ F: include/linux/mlx5/ F: include/uapi/rdma/mlx5-abi.h MELLANOX MLXCPLD I2C AND MUX DRIVER -M: Vadim Pasternak -M: Michael Shych +M: Vadim Pasternak +M: Michael Shych L: linux-i2c@vger.kernel.org S: Supported F: Documentation/i2c/busses/i2c-mlxcpld.rst @@ -11186,7 +11211,7 @@ F: drivers/i2c/busses/i2c-mlxcpld.c F: drivers/i2c/muxes/i2c-mux-mlxcpld.c MELLANOX MLXCPLD LED DRIVER -M: Vadim Pasternak +M: Vadim Pasternak L: linux-leds@vger.kernel.org S: Supported F: Documentation/leds/leds-mlxcpld.rst @@ -11194,7 +11219,7 @@ F: drivers/leds/leds-mlxcpld.c F: drivers/leds/leds-mlxreg.c MELLANOX PLATFORM DRIVER -M: Vadim Pasternak +M: Vadim Pasternak L: platform-driver-x86@vger.kernel.org S: Supported F: drivers/platform/x86/mlx-platform.c @@ -12175,8 +12200,8 @@ F: net/ipv6/syncookies.c F: net/ipv6/tcp*.c NETWORKING [TLS] -M: Boris Pismenny -M: Aviad Yehezkel +M: Boris Pismenny +M: Aviad Yehezkel M: John Fastabend M: Daniel Borkmann M: Jakub Kicinski @@ -12484,7 +12509,7 @@ S: Supported F: drivers/nfc/nxp-nci OBJAGG -M: Jiri Pirko +M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: include/linux/objagg.h @@ -13126,7 +13151,7 @@ F: drivers/video/logo/logo_parisc* F: include/linux/hp_sdc.h PARMAN -M: Jiri Pirko +M: Jiri Pirko L: netdev@vger.kernel.org S: Supported F: include/linux/parman.h @@ -13445,10 +13470,10 @@ F: Documentation/devicetree/bindings/pci/axis,artpec* F: drivers/pci/controller/dwc/*artpec* PCIE DRIVER FOR CAVIUM THUNDERX -M: Robert Richter +M: Robert Richter L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported +S: Odd Fixes F: drivers/pci/controller/pci-thunder-* PCIE DRIVER FOR HISILICON @@ -13585,12 +13610,18 @@ F: kernel/events/* F: tools/lib/perf/ F: tools/perf/ -PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS +PERFORMANCE EVENTS TOOLING ARM64 R: John Garry R: Will Deacon +R: Mathieu Poirier +R: Leo Yan L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported +F: tools/build/feature/test-libopencsd.c +F: tools/perf/arch/arm*/ F: tools/perf/pmu-events/arch/arm64/ +F: tools/perf/util/arm-spe* +F: tools/perf/util/cs-etm* PERSONALITY HANDLING M: Christoph Hellwig @@ -14381,7 +14412,7 @@ M: Rob Clark L: iommu@lists.linux-foundation.org L: linux-arm-msm@vger.kernel.org S: Maintained -F: drivers/iommu/qcom_iommu.c +F: drivers/iommu/arm/arm-smmu/qcom_iommu.c QUALCOMM IPCC MAILBOX DRIVER M: Manivannan Sadhasivam @@ -15562,6 +15593,7 @@ F: include/uapi/linux/sed* SECURITY CONTACT M: Security Officers S: Supported +F: Documentation/admin-guide/security-bugs.rst SECURITY SUBSYSTEM M: James Morris @@ -16050,7 +16082,7 @@ F: drivers/infiniband/sw/siw/ F: include/uapi/rdma/siw-abi.h SOFT-ROCE DRIVER (rxe) -M: Zhu Yanjun +M: Zhu Yanjun L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/sw/rxe/ @@ -17132,8 +17164,8 @@ S: Maintained F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt -F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt -F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt +F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml +F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt F: drivers/clk/keystone/sci-clk.c @@ -17230,8 +17262,8 @@ S: Maintained F: drivers/net/thunderbolt.c THUNDERX GPIO DRIVER -M: Robert Richter -S: Maintained +M: Robert Richter +S: Odd Fixes F: drivers/gpio/gpio-thunderx.c TI AM437X VPFE DRIVER @@ -18890,6 +18922,15 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core F: arch/x86/platform +X86 PLATFORM UV HPE SUPERDOME FLEX +M: Steve Wahl +R: Dimitri Sivanich +R: Russ Anderson +S: Supported +F: arch/x86/include/asm/uv/ +F: arch/x86/kernel/apic/x2apic_uv_x.c +F: arch/x86/platform/uv/ + X86 VDSO M: Andy Lutomirski L: linux-kernel@vger.kernel.org diff --git a/Makefile b/Makefile index 9cac6fde347956c9a2ab08f6297b555be82cf898..19d012810fbbca2242460aeabad6ba5ed5c380ff 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 9 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc5 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -265,8 +265,7 @@ no-dot-config-targets := $(clean-targets) \ $(version_h) headers headers_% archheaders archscripts \ %asm-generic kernelversion %src-pkg dt_binding_check \ outputmakefile -no-sync-config-targets := $(no-dot-config-targets) install %install \ - kernelrelease +no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/ config-build := @@ -292,7 +291,7 @@ ifneq ($(KBUILD_EXTMOD),) endif ifeq ($(KBUILD_EXTMOD),) - ifneq ($(filter config %config,$(MAKECMDGOALS)),) + ifneq ($(filter %config,$(MAKECMDGOALS)),) config-build := 1 ifneq ($(words $(MAKECMDGOALS)),1) mixed-build := 1 @@ -883,10 +882,6 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections LDFLAGS_vmlinux += --gc-sections endif -ifdef CONFIG_LIVEPATCH -KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone) -endif - ifdef CONFIG_SHADOW_CALL_STACK CC_FLAGS_SCS := -fsanitize=shadow-call-stack KBUILD_CFLAGS += $(CC_FLAGS_SCS) diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index ac110ae8f978032b6ad837663ab3b3f83c0d3b5d..5b60c248de9eae6d762937f3eb570fbfeaeb5b65 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -212,7 +212,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, STO_ALPHA_STD_GPLOAD) /* Omit the prologue. */ value += 8; - /* FALLTHRU */ + fallthrough; case R_ALPHA_BRADDR: value -= (u64)location + 4; if (value & 3) diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index a813020d2f1170b3daba65ccc270718d5ea24b84..15bc9d1e79f4d70cead96ffed802c879499efea1 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -453,7 +453,7 @@ syscall_restart(unsigned long r0, unsigned long r19, regs->r0 = EINTR; break; } - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->r0 = r0; /* reset v0 and a3 and replay syscall */ regs->r19 = r19; diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 49754e07e04f47e596ad0662b484283b974cb786..921d4b6e4d956185e6dbb7c3dd06f18f47b84bd7 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -883,7 +883,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, case 0x26: /* sts */ fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); - /* FALLTHRU */ + fallthrough; case 0x2c: /* stl */ __asm__ __volatile__( @@ -911,7 +911,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, case 0x27: /* stt */ fake_reg = alpha_read_fp_reg(reg); - /* FALLTHRU */ + fallthrough; case 0x2d: /* stq */ __asm__ __volatile__( diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 9acbeba832c0b532ca1c4a2623f215335edcd481..dcaa44e408ace2adc3bdad5c8545ccaf046d55f9 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -88,6 +88,8 @@ idu_intc: idu-interrupt-controller { arcpct: pct { compatible = "snps,archs-pct"; + interrupt-parent = <&cpu_intc>; + interrupts = <20>; }; /* TIMER0 with interrupt for clockevent */ @@ -208,7 +210,7 @@ gmac: ethernet@8000 { reg = <0x8000 0x2000>; interrupts = <10>; interrupt-names = "macirq"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; snps,pbl = <32>; snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; @@ -226,7 +228,7 @@ mdio { #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; - phy0: ethernet-phy@0 { + phy0: ethernet-phy@0 { /* Micrel KSZ9031 */ reg = <0>; }; }; diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index b747f2ec2928770e8e1a023841f3fc7e75975b04..6147db9252487f76875afbc7953a3c69b518aac1 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -18,10 +18,10 @@ * vineetg: April 2010 * -Switched pgtable_t from being struct page * to unsigned long * =Needed so that Page Table allocator (pte_alloc_one) is not forced to - * to deal with struct page. Thay way in future we can make it allocate + * deal with struct page. That way in future we can make it allocate * multiple PG Tbls in one Page Frame * =sweet side effect is avoiding calls to ugly page_address( ) from the - * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate + * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate) * * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 */ diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c index d04837d91b407797d2881cff37c2cd00b08e0487..03f8b1be0c3a82ba27b54e0ba96243a4766f8b66 100644 --- a/arch/arc/kernel/disasm.c +++ b/arch/arc/kernel/disasm.c @@ -339,7 +339,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state, case op_LDWX_S: /* LDWX_S c, [b, u6] */ state->x = 1; - /* intentional fall-through */ + fallthrough; case op_LDW_S: /* LDW_S c, [b, u6] */ state->zz = 2; diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 661fd842ea97db7ad4f3871b2a1038d715a44130..79849f37e782c696528055b3f1ff852f3bceb08d 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, has_interrupts; + int i, has_interrupts, irq; int counter_size; /* in bits */ union cc_name { @@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) .attr_groups = arc_pmu->attr_groups, }; - if (has_interrupts) { - int irq = platform_get_irq(pdev, 0); - - if (irq < 0) { - pr_err("Cannot get IRQ number for the platform\n"); - return -ENODEV; - } + if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) { arc_pmu->irq = irq; @@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev) this_cpu_ptr(&arc_pmu_cpu)); on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); - - } else + } else { arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + } /* * perf parser doesn't really like '-' symbol in events name, so let's diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 3d57ed0d8535098d958b03091ba59c271c98f126..8222f8c546902950abd7f4568e785a8de9122e9e 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -321,7 +321,7 @@ static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs) regs->r0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: /* diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 28e8bf04b253f47be704cc375907d270536ecb94..a331bb5d8319f2b9110a87eb8fc5f0a27b71988c 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -18,44 +18,37 @@ #define ARC_PATH_MAX 256 -/* - * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) - * -Prints 3 regs per line and a CR. - * -To continue, callee regs right after scratch, special handling of CR - */ -static noinline void print_reg_file(long *reg_rev, int start_num) +static noinline void print_regs_scratch(struct pt_regs *regs) { - unsigned int i; - char buf[512]; - int n = 0, len = sizeof(buf); - - for (i = start_num; i < start_num + 13; i++) { - n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", - i, (unsigned long)*reg_rev); - - if (((i + 1) % 3) == 0) - n += scnprintf(buf + n, len - n, "\n"); - - /* because pt_regs has regs reversed: r12..r0, r25..r13 */ - if (is_isa_arcv2() && start_num == 0) - reg_rev++; - else - reg_rev--; - } - - if (start_num != 0) - n += scnprintf(buf + n, len - n, "\n\n"); + pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n", + regs->bta, regs->sp, regs->fp, (void *)regs->blink); + pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", + regs->lp_start, regs->lp_end, regs->lp_count); - /* To continue printing callee regs on same line as scratch regs */ - if (start_num == 0) - pr_info("%s", buf); - else - pr_cont("%s\n", buf); + pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \ + "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \ + "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \ + "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \ + "r12: 0x%08lx\t", + regs->r0, regs->r1, regs->r2, + regs->r3, regs->r4, regs->r5, + regs->r6, regs->r7, regs->r8, + regs->r9, regs->r10, regs->r11, + regs->r12); } -static void show_callee_regs(struct callee_regs *cregs) +static void print_regs_callee(struct callee_regs *regs) { - print_reg_file(&(cregs->r13), 13); + pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \ + "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \ + "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \ + "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \ + "r24: 0x%08lx\tr25: 0x%08lx\n", + regs->r13, regs->r14, + regs->r15, regs->r16, regs->r17, + regs->r18, regs->r19, regs->r20, + regs->r21, regs->r22, regs->r23, + regs->r24, regs->r25); } static void print_task_path_n_nm(struct task_struct *tsk) @@ -175,7 +168,7 @@ static void show_ecr_verbose(struct pt_regs *regs) void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; - struct callee_regs *cregs; + struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg; /* * generic code calls us with preemption disabled, but some calls @@ -204,25 +197,15 @@ void show_regs(struct pt_regs *regs) STS_BIT(regs, A2), STS_BIT(regs, A1), STS_BIT(regs, E2), STS_BIT(regs, E1)); #else - pr_cont(" [%2s%2s%2s%2s]", + pr_cont(" [%2s%2s%2s%2s] ", STS_BIT(regs, IE), (regs->status32 & STATUS_U_MASK) ? "U " : "K ", STS_BIT(regs, DE), STS_BIT(regs, AE)); #endif - pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n", - regs->bta, regs->sp, regs->fp, (void *)regs->blink); - pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", - regs->lp_start, regs->lp_end, regs->lp_count); - - /* print regs->r0 thru regs->r12 - * Sequential printing was generating horrible code - */ - print_reg_file(&(regs->r0), 0); - /* If Callee regs were saved, display them too */ - cregs = (struct callee_regs *)current->thread.callee_reg; + print_regs_scratch(regs); if (cregs) - show_callee_regs(cregs); + print_regs_callee(cregs); preempt_disable(); } diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index f87758a6851bd132124c88512a9fbccf7870c49d..74ad4256022e4b80ac9b2937a1e86471197d1909 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -572,7 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end, #else BUILD_BUG_ON(sizeof(u32) != sizeof(value)); #endif - /* Fall through */ + fallthrough; case DW_EH_PE_native: if (end < (const void *)(ptr.pul + 1)) return 0; @@ -827,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, case DW_CFA_def_cfa: state->cfa.reg = get_uleb128(&ptr.p8, end); unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); - /* fall through */ + fallthrough; case DW_CFA_def_cfa_offset: state->cfa.offs = get_uleb128(&ptr.p8, end); unw_debug("cfa_def_cfa_offset: 0x%lx ", @@ -835,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, break; case DW_CFA_def_cfa_sf: state->cfa.reg = get_uleb128(&ptr.p8, end); - /* fall through */ + fallthrough; case DW_CFA_def_cfa_offset_sf: state->cfa.offs = get_sleb128(&ptr.p8, end) * state->dataAlign; diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index f886ac69d8ada101fc6bf14f34f0293cd732a032..3a35b82a718e3e292ee68ede738a2fc501706442 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -26,8 +26,8 @@ static unsigned long low_mem_sz; #ifdef CONFIG_HIGHMEM static unsigned long min_high_pfn, max_high_pfn; -static u64 high_mem_start; -static u64 high_mem_sz; +static phys_addr_t high_mem_start; +static phys_addr_t high_mem_sz; #endif #ifdef CONFIG_DISCONTIGMEM @@ -69,6 +69,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) high_mem_sz = size; in_use = 1; memblock_add_node(base, size, 1); + memblock_reserve(base, size); #endif } @@ -157,7 +158,7 @@ void __init setup_arch_memory(void) min_high_pfn = PFN_DOWN(high_mem_start); max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); - max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn; + max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn; high_memory = (void *)(min_high_pfn << PAGE_SHIFT); kmap_init(); @@ -166,22 +167,26 @@ void __init setup_arch_memory(void) free_area_init(max_zone_pfn); } -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Calculates and displays memory available/used - */ -void __init mem_init(void) +static void __init highmem_init(void) { #ifdef CONFIG_HIGHMEM unsigned long tmp; - reset_all_zones_managed_pages(); + memblock_free(high_mem_start, high_mem_sz); for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) free_highmem_page(pfn_to_page(tmp)); #endif +} +/* + * mem_init - initializes memory + * + * Frees up bootmem + * Calculates and displays memory available/used + */ +void __init mem_init(void) +{ memblock_free_all(); + highmem_init(); mem_init_print_info(NULL); } diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index a4a61531c7fb9388e31fbc63b72d559663e1e965..77712c5ffe8482eac5db53249a1c34653d0e48f8 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -33,7 +33,6 @@ #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index cbebed5f050eb09a29b3134f5ff9848f07ce42b8..e8df458aad392e7223228c9b250d4da9ce0b3099 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -217,7 +217,7 @@ rng: rng@33000 { }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 0346ea621f0f930add2149c4b0817052d4a12606..c846fa3c244d36555eead3328d3c81d2042a0039 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -284,7 +284,7 @@ nand: nand@26000 { }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 2d9b4dd058307e7f1c5c44bae5e4f2e75d5ea6c3..0016720ce530012119a3e560b6f92db01389c3a6 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -488,7 +488,7 @@ nand: nand@18028000 { }; spi@18029200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18029200 0x184>, <0x18029000 0x124>, <0x1811b408 0x004>, diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts index 7a3d1d3e54a9427c90f1f0846c1234bce93aec1c..8f94364ba484721192383bdd40bb1902f9c02637 100644 --- a/arch/arm/boot/dts/imx6q-logicpd.dts +++ b/arch/arm/boot/dts/imx6q-logicpd.dts @@ -13,7 +13,7 @@ / { backlight: backlight-lvds { compatible = "pwm-backlight"; - pwms = <&pwm3 0 20000>; + pwms = <&pwm3 0 20000 0>; brightness-levels = <0 4 8 16 32 64 128 255>; default-brightness-level = <6>; power-supply = <®_lcd>; diff --git a/arch/arm/boot/dts/imx6q-prtwd2.dts b/arch/arm/boot/dts/imx6q-prtwd2.dts index dffafbcaa7af337eb220af2edecb337362dbab4f..349959d38020066b3028bef12fe601c81041b3a2 100644 --- a/arch/arm/boot/dts/imx6q-prtwd2.dts +++ b/arch/arm/boot/dts/imx6q-prtwd2.dts @@ -30,7 +30,7 @@ usdhc2_wifi_pwrseq: usdhc2_wifi_pwrseq { }; /* PRTWD2 rev 1 bitbang I2C for Ethernet Switch */ - i2c@4 { + i2c { compatible = "i2c-gpio"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c4>; diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi index 7705285d9e3c7de096fab0c696534fa78c384337..4d01c3300b975c5dd1046c4b9feec5a6e24ef753 100644 --- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi @@ -22,8 +22,6 @@ chosen { gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; user-pb { label = "user_pb"; diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h index 0b02c7e60c174573ffb56792e20aaefe84985ab1..f4dc46207954ce46485ad1164dc5254ae53790d0 100644 --- a/arch/arm/boot/dts/imx6sx-pinfunc.h +++ b/arch/arm/boot/dts/imx6sx-pinfunc.h @@ -1026,7 +1026,7 @@ #define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0 #define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0 #define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4 -#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0 +#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0 #define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1 #define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2 #define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1 diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts index e5e20b07f184b320e3c3e2aea2d9428d81ac1eb6..7cb6153fc650b129abb7436c918b5b4db175e5e0 100644 --- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts +++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts @@ -58,7 +58,7 @@ &fec1 { <&clks IMX7D_ENET1_TIME_ROOT_CLK>; assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; assigned-clock-rates = <0>, <100000000>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&fec1_phy>; status = "okay"; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index 367439639da9193e6f51d364c2b465ab660d2b41..b7ea37ad4e55c45f5615766f3fa30bf7d9eab4a4 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -394,7 +394,7 @@ gpio_ptc: gpio@40ae0000 { clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLC>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 0 32>; + gpio-ranges = <&iomuxc1 0 0 20>; }; gpio_ptd: gpio@40af0000 { @@ -408,7 +408,7 @@ gpio_ptd: gpio@40af0000 { clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLD>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 32 32>; + gpio-ranges = <&iomuxc1 0 32 12>; }; gpio_pte: gpio@40b00000 { @@ -422,7 +422,7 @@ gpio_pte: gpio@40b00000 { clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 64 32>; + gpio-ranges = <&iomuxc1 0 64 16>; }; gpio_ptf: gpio@40b10000 { @@ -436,7 +436,7 @@ gpio_ptf: gpio@40b10000 { clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, <&pcc3 IMX7ULP_CLK_PCTLF>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc1 0 96 32>; + gpio-ranges = <&iomuxc1 0 96 20>; }; }; diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi index 100396f6c2feb1e15e4fab19667c0b83dda82d23..395e05f10d36ce91ea6fceb2c18c9b2e28caf604 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi @@ -51,6 +51,8 @@ &vaux4 { &mcbsp2 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; }; &charger { @@ -102,35 +104,18 @@ video_reg: video_reg { regulator-max-microvolt = <3300000>; }; - lcd0: display@0 { - compatible = "panel-dpi"; - label = "28"; - status = "okay"; - /* default-on; */ + lcd0: display { + /* This isn't the exact LCD, but the timings meet spec */ + compatible = "logicpd,type28"; pinctrl-names = "default"; pinctrl-0 = <&lcd_enable_pin>; - enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */ + backlight = <&bl>; + enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; }; }; - - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <3>; - hback-porch = <2>; - hsync-len = <42>; - vback-porch = <3>; - vfront-porch = <2>; - vsync-len = <11>; - hsync-active = <1>; - vsync-active = <1>; - de-active = <1>; - pixelclk-active = <0>; - }; }; bl: backlight { diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 381f0e82bb706cc5b9b8b951dcb4e09cafb93991..b0f6613e6d5498d55c9a2ebc1f10ebda9ceabb2e 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -81,6 +81,8 @@ &vaux4 { }; &mcbsp2 { + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; status = "okay"; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 069af9a19bb64456ca28b2d0d2edef04d0d18703..827373ef1a547ef6e5f74bd49787a3237853bc76 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -182,7 +182,7 @@ qspi: spi@1550000 { #address-cells = <1>; #size-cells = <0>; reg = <0x0 0x1550000 0x0 0x10000>, - <0x0 0x40000000 0x0 0x40000000>; + <0x0 0x40000000 0x0 0x20000000>; reg-names = "QuadSPI", "QuadSPI-memory"; interrupts = ; clock-names = "qspi_en", "qspi"; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 5da9cff7a53c7378f6453a4a2ff112b267207e1e..a82c96258a9336ab5c1895b61857e66d591c9687 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -488,11 +488,11 @@ rfbi: encoder@0 { }; }; - target-module@5000 { + target-module@4000 { compatible = "ti,sysc-omap2", "ti,sysc"; - reg = <0x5000 0x4>, - <0x5010 0x4>, - <0x5014 0x4>; + reg = <0x4000 0x4>, + <0x4010 0x4>, + <0x4014 0x4>; reg-names = "rev", "sysc", "syss"; ti,sysc-sidle = , , @@ -504,7 +504,7 @@ SYSC_OMAP2_SOFTRESET | ti,syss-mask = <1>; #address-cells = <1>; #size-cells = <1>; - ranges = <0 0x5000 0x1000>; + ranges = <0 0x4000 0x1000>; dsi1: encoder@0 { compatible = "ti,omap5-dsi"; @@ -514,8 +514,9 @@ dsi1: encoder@0 { reg-names = "proto", "phy", "pll"; interrupts = ; status = "disabled"; - clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>; - clock-names = "fck"; + clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, + <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>; + clock-names = "fck", "sys_clk"; }; }; @@ -545,8 +546,9 @@ dsi2: encoder@0 { reg-names = "proto", "phy", "pll"; interrupts = ; status = "disabled"; - clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>; - clock-names = "fck"; + clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, + <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>; + clock-names = "fck", "sys_clk"; }; }; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index fc4abef143a066eb86d0d704600c30170faca7c8..0013ec3463c46569750d52d8f0ce6867285d664a 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -821,7 +821,7 @@ timer2: timer2@ffd00000 { timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; - reg = <0xffd01000 0x100>; + reg = <0xffd00100 0x100>; clocks = <&l4_sys_free_clk>; clock-names = "timer"; resets = <&rst L4SYSTIMER1_RESET>; diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index 0fe03aa0367f236ce964b1438e5c760a233bf3d1..2259d11af721e8d90acea7ca0de3df971b66dd91 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -495,7 +495,7 @@ edma1: dma-controller@40098000 { }; ocotp: ocotp@400a5000 { - compatible = "fsl,vf610-ocotp"; + compatible = "fsl,vf610-ocotp", "syscon"; reg = <0x400a5000 0x1000>; clocks = <&clks VF610_CLK_OCOTP>; }; diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig index a9755c501bec405669b8213f820d58009980a2ac..b06e537d5149031afa1acf974ad53b0dcaa9f77f 100644 --- a/arch/arm/configs/integrator_defconfig +++ b/arch/arm/configs/integrator_defconfig @@ -1,13 +1,11 @@ CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y CONFIG_ARCH_MULTI_V4T=y CONFIG_ARCH_MULTI_V5=y # CONFIG_ARCH_MULTI_V7 is not set @@ -15,19 +13,17 @@ CONFIG_ARCH_INTEGRATOR=y CONFIG_ARCH_INTEGRATOR_AP=y CONFIG_INTEGRATOR_IMPD1=y CONFIG_ARCH_INTEGRATOR_CP=y -CONFIG_PCI=y -CONFIG_PREEMPT=y CONFIG_AEABI=y # CONFIG_ATAGS is not set -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp" CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPUFREQ_DT=y -CONFIG_CMA=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -37,6 +33,7 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y # CONFIG_IPV6 is not set +CONFIG_PCI=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_AFS_PARTS=y @@ -52,9 +49,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_NETDEVICES=y CONFIG_E100=y CONFIG_SMC91X=y +CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GPIO=y # CONFIG_SERIO_SERPORT is not set CONFIG_DRM=y +CONFIG_DRM_DISPLAY_CONNECTOR=y CONFIG_DRM_SIMPLE_BRIDGE=y CONFIG_DRM_PL111=y CONFIG_FB_MODE_HELPERS=y diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7fff88e612525cfba94b4987ee526f58285debd5..7a4853b1213a8e3d3f20f23122b7bc0cf6cff05e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp, if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; - /* Else, fall through */ + fallthrough; default: return -EINVAL; } @@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Else, fall through */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Else, fall through */ + fallthrough; default: ret = -EINVAL; goto out; @@ -884,7 +884,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, break; case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); - /* Fall through */ + fallthrough; case ARM_ENTRY_SYNC_WATCHPOINT: watchpoint_handler(addr, fsr, regs); break; @@ -933,7 +933,7 @@ static bool core_has_os_save_restore(void) ARM_DBG_READ(c1, c1, 4, oslsr); if (oslsr & ARM_OSLSR_OSLM0) return true; - /* Else, fall through */ + fallthrough; default: return false; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index c9dc912b83f0129fc3664b43ccb4bdc56b8be2b8..c1892f733f208da0b2a0a419e296dd2fc0b03675 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -596,7 +596,7 @@ static int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart -= 2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c index 1c05c5bf7e5c89331f9b72771334b819c4ec8a2c..757032d82f6309834ef0470aa026272a4cb5a165 100644 --- a/arch/arm/mach-ep93xx/crunch.c +++ b/arch/arm/mach-ep93xx/crunch.c @@ -49,7 +49,7 @@ static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ - /* Fall through */ + fallthrough; case THREAD_NOTIFY_EXIT: crunch_task_release(thread); diff --git a/arch/arm/mach-mmp/pm-mmp2.c b/arch/arm/mach-mmp/pm-mmp2.c index 2d86381e152d6604cb9d4cb96e655a14885f8ca0..7a6f74c32d428ff7d0887cd75e6ce02930ce3505 100644 --- a/arch/arm/mach-mmp/pm-mmp2.c +++ b/arch/arm/mach-mmp/pm-mmp2.c @@ -123,19 +123,19 @@ void mmp2_pm_enter_lowpower_mode(int state) case POWER_MODE_SYS_SLEEP: apcr |= MPMU_PCR_PJ_SLPEN; /* set the SLPEN bit */ apcr |= MPMU_PCR_PJ_VCTCXOSD; /* set VCTCXOSD */ - /* fall through */ + fallthrough; case POWER_MODE_CHIP_SLEEP: apcr |= MPMU_PCR_PJ_SLPEN; - /* fall through */ + fallthrough; case POWER_MODE_APPS_SLEEP: apcr |= MPMU_PCR_PJ_APBSD; /* set APBSD */ - /* fall through */ + fallthrough; case POWER_MODE_APPS_IDLE: apcr |= MPMU_PCR_PJ_AXISD; /* set AXISDD bit */ apcr |= MPMU_PCR_PJ_DDRCORSD; /* set DDRCORSD bit */ idle_cfg |= APMU_PJ_IDLE_CFG_PJ_PWRDWN; /* PJ power down */ apcr |= MPMU_PCR_PJ_SPSD; - /* fall through */ + fallthrough; case POWER_MODE_CORE_EXTIDLE: idle_cfg |= APMU_PJ_IDLE_CFG_PJ_IDLE; /* set the IDLE bit */ idle_cfg &= ~APMU_PJ_IDLE_CFG_ISO_MODE_CNTRL_MASK; diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c index 69ebe18ff209fa39d72d06391ee56240ed320fad..1d71d73c18620608ca6d47ab96f785a8fef77399 100644 --- a/arch/arm/mach-mmp/pm-pxa910.c +++ b/arch/arm/mach-mmp/pm-pxa910.c @@ -145,23 +145,23 @@ void pxa910_pm_enter_lowpower_mode(int state) case POWER_MODE_UDR: /* only shutdown APB in UDR */ apcr |= MPMU_APCR_STBYEN | MPMU_APCR_APBSD; - /* fall through */ + fallthrough; case POWER_MODE_SYS_SLEEP: apcr |= MPMU_APCR_SLPEN; /* set the SLPEN bit */ apcr |= MPMU_APCR_VCTCXOSD; /* set VCTCXOSD */ - /* fall through */ + fallthrough; case POWER_MODE_APPS_SLEEP: apcr |= MPMU_APCR_DDRCORSD; /* set DDRCORSD */ - /* fall through */ + fallthrough; case POWER_MODE_APPS_IDLE: apcr |= MPMU_APCR_AXISD; /* set AXISDD bit */ - /* fall through */ + fallthrough; case POWER_MODE_CORE_EXTIDLE: idle_cfg |= APMU_MOH_IDLE_CFG_MOH_IDLE; idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWRDWN; idle_cfg |= APMU_MOH_IDLE_CFG_MOH_PWR_SW(3) | APMU_MOH_IDLE_CFG_MOH_L2_PWR_SW(3); - /* fall through */ + fallthrough; case POWER_MODE_CORE_INTIDLE: break; } diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 1d119b974f5ff951c40b5975eac5583dfadf61c0..59755b5a1ad7a5d996abd9feb607cfb0cc4f5012 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -396,7 +396,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "3.1"; break; case 7: - /* FALLTHROUGH */ default: /* Use the latest known revision as default */ omap_revision = OMAP3430_REV_ES3_1_2; @@ -416,7 +415,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.0"; break; case 1: - /* FALLTHROUGH */ default: omap_revision = AM35XX_REV_ES1_1; cpu_rev = "1.1"; @@ -435,7 +433,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.1"; break; case 2: - /* FALLTHROUGH */ default: omap_revision = OMAP3630_REV_ES1_2; cpu_rev = "1.2"; @@ -456,7 +453,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "2.0"; break; case 3: - /* FALLTHROUGH */ default: omap_revision = TI8168_REV_ES2_1; cpu_rev = "2.1"; @@ -473,7 +469,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "2.0"; break; case 2: - /* FALLTHROUGH */ default: omap_revision = AM335X_REV_ES2_1; cpu_rev = "2.1"; @@ -491,7 +486,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.1"; break; case 2: - /* FALLTHROUGH */ default: omap_revision = AM437X_REV_ES1_2; cpu_rev = "1.2"; @@ -502,7 +496,6 @@ void __init omap3xxx_check_revision(void) case 0xb968: switch (rev) { case 0: - /* FALLTHROUGH */ case 1: omap_revision = TI8148_REV_ES1_0; cpu_rev = "1.0"; @@ -512,7 +505,6 @@ void __init omap3xxx_check_revision(void) cpu_rev = "2.0"; break; case 3: - /* FALLTHROUGH */ default: omap_revision = TI8148_REV_ES2_1; cpu_rev = "2.1"; diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index 54aff33e55e6efac5de04ce915967ee77c9c412b..bfa5e1b8dba7f86a63cbe26ef01161250d1c4b8a 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev) return pwrdm; clk = of_clk_get(dev->of_node->parent, 0); - if (!clk) { + if (IS_ERR(clk)) { dev_err(dev, "no fck found\n"); return NULL; } diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 6b4548f3b57f08e26939099189b589d931da70c4..fc7bb2ca16727dadf7eec4885f6afb96c16bb9cc 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -240,7 +240,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb, if (pdev->dev.of_node) omap_device_build_from_dt(pdev); omap_auxdata_legacy_init(dev); - /* fall through */ + fallthrough; default: od = to_omap_device(pdev); if (od) diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 6df395fff971b08f5b24cc67dec547d710ab9845..f5dfddf492e210979564f29c0cfa80b9f2850f0a 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -298,11 +298,7 @@ static void omap3_pm_idle(void) if (omap_irq_pending()) return; - trace_cpu_idle_rcuidle(1, smp_processor_id()); - omap_sram_idle(); - - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index d13344b2ddcd4ef0ad6c8b8554733a0fc6063610..87cb47220e825938ae9d8c5bc011e4c70ccb4d96 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -624,7 +624,7 @@ static void __init dns323_init(void) dns323ab_leds[0].active_low = 1; gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable"); gpio_direction_output(DNS323_GPIO_LED_POWER1, 0); - /* Fall through */ + fallthrough; case DNS323_REV_B1: i2c_register_board_info(0, dns323ab_i2c_devices, ARRAY_SIZE(dns323ab_i2c_devices)); diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index ea2c84214bac18a18e03475e48a765156f72e6fa..d23970bd638d9bd0d4f93a507752d0ec54f9392e 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -46,7 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag) switch (tag->u.acorn.vram_pages) { case 512: vram_size += PAGE_SIZE * 256; - /* Fall through - ??? */ + fallthrough; /* ??? */ case 256: vram_size += PAGE_SIZE * 256; default: diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c index 76a65df42d10f2968de89c8e935215ab1da11977..d5c805adf7a82b938bebd8941eae974cf6bcdbe3 100644 --- a/arch/arm/mach-tegra/reset.c +++ b/arch/arm/mach-tegra/reset.c @@ -70,7 +70,7 @@ static void __init tegra_cpu_reset_handler_enable(void) switch (err) { case -ENOSYS: tegra_cpu_reset_handler_set(reset_address); - /* fall through */ + fallthrough; case 0: is_enabled = true; break; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index f4bfc1cac91aa2998750fedde133745c6b02d30d..ea81e89e77400e6318698085f2a82e10d5f1ccb5 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -694,7 +694,7 @@ thumb2arm(u16 tinstr) return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] | (tinstr & 255); /* register_list */ } - /* Else, fall through - for illegal instruction case */ + fallthrough; /* for illegal instruction case */ default: return BAD_INSTR; @@ -750,7 +750,7 @@ do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs, case 0xe8e0: case 0xe9e0: poffset->un = (tinst2 & 0xff) << 2; - /* Fall through */ + fallthrough; case 0xe940: case 0xe9c0: diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index c0fbfca5da8bfe58189eee1bb219290af8c89443..114c05ab4dd919a26064eda1f395f1c8f4119ec1 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -71,7 +71,7 @@ static void cpu_v7_spectre_init(void) /* Other ARM CPUs require no workaround */ if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) break; - /* fallthrough */ + fallthrough; /* Cortex A57/A72 require firmware workaround */ case ARM_CPU_PART_CORTEX_A57: case ARM_CPU_PART_CORTEX_A72: { diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index b2e9e822426fd02b742fb836d624707bbc6e9f7e..1eb59003bdecdea32408afb842c800d94ca0d072 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -309,14 +309,14 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) * not supported by current hardware on OMAP1 * w |= (0x03 << 7); */ - /* fall through */ + fallthrough; case OMAP_DMA_DATA_BURST_16: if (dma_omap2plus()) { burst = 0x3; break; } /* OMAP1 don't support burst 16 */ - /* fall through */ + fallthrough; default: BUG(); } @@ -393,7 +393,7 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) break; } /* OMAP1 don't support burst 16 */ - /* fall through */ + fallthrough; default: printk(KERN_ERR "Invalid DMA burst mode\n"); BUG(); diff --git a/arch/arm/probes/decode.c b/arch/arm/probes/decode.c index fe81a9c21f2d2dc2daeca5732a5d5533f13d3854..c84053a81358f3ff80776cd52a48095c11e8c611 100644 --- a/arch/arm/probes/decode.c +++ b/arch/arm/probes/decode.c @@ -307,7 +307,7 @@ static bool __kprobes decode_regs(probes_opcode_t *pinsn, u32 regs, bool modify) case REG_TYPE_NOPCWB: if (!is_writeback(insn)) break; /* No writeback, so any register is OK */ - /* fall through... */ + fallthrough; case REG_TYPE_NOPC: case REG_TYPE_NOPCX: /* Reject PC (R15) */ diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 90b5bc723c83f0163831d003920d1a7bb80f7c47..feefa2055ebaa5a95af1b73d91ee3bdc372a1add 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -280,7 +280,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) /* A nested probe was hit in FIQ, it is a BUG */ pr_warn("Unrecoverable kprobe detected.\n"); dump_kprobe(p); - /* fall through */ + fallthrough; default: /* impossible cases */ BUG(); diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 55bc8546d9c749dbb77cc94793674eaea98111cc..130569f90c54ae64ef479adc20c0c7b76f44a3b8 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -82,8 +82,8 @@ endif # compiler to generate them and consequently to break the single image contract # we pass it only to the assembler. This option is utilized only in case of non # integrated assemblers. -ifneq ($(CONFIG_AS_HAS_ARMV8_4), y) -branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a +ifeq ($(CONFIG_AS_HAS_PAC), y) +asm-arch := armv8.3-a endif endif @@ -91,7 +91,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y) ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) # make sure to pass the newest target architecture to -march. -KBUILD_CFLAGS += -Wa,-march=armv8.4-a +asm-arch := armv8.4-a +endif + +ifdef asm-arch +KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \ + -DARM64_ASM_ARCH='"$(asm-arch)"' endif ifeq ($(CONFIG_SHADOW_CALL_STACK), y) @@ -165,6 +170,8 @@ zinstall install: PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ + $(if $(CONFIG_COMPAT_VDSO), \ + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@) # We use MRPROPER_FILES and CLEAN_FILES now archclean: diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 15f7b0ed38369a0821ee4b40b2dcbaf1cf5d0a20..39802066232e1e942cd0e20b11f633332d32c6e0 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -745,7 +745,7 @@ nand: nand@66460000 { }; qspi: spi@66470200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; reg = <0x66470200 0x184>, <0x66470000 0x124>, <0x67017408 0x004>, diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile index a39f0a1723e026b752bd8d40e6c3e8cb345a2475..903c0eb61290d320d719a83c5991e747a5afae61 100644 --- a/arch/arm64/boot/dts/freescale/Makefile +++ b/arch/arm64/boot/dts/freescale/Makefile @@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi index 9de2aa1c573c7f41b34416d02bc767593b47e976..a5154f13a18e924386e370bd8efd015b7f9a0cc2 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi @@ -702,7 +702,7 @@ sdma1: dma-controller@30bd0000 { reg = <0x30bd0000 0x10000>; interrupts = ; clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>, - <&clk IMX8MP_CLK_SDMA1_ROOT>; + <&clk IMX8MP_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index f70435cf9ad57c123339ebcc63b9776ed0f8af03..561fa792fe5a98508db38a739090551468655a5c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -423,7 +423,7 @@ gpio5: gpio@30240000 { tmu: tmu@30260000 { compatible = "fsl,imx8mq-tmu"; reg = <0x30260000 0x10000>; - interrupt = ; + interrupts = ; clocks = <&clk IMX8MQ_CLK_TMU_ROOT>; little-endian; fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 1a39e0ef776bb300775d022493653b3d5cbec074..5b9ec032ce8d8dfb6c3c125b50e28fbd62cef31b 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -686,6 +686,8 @@ mmc0: mmc@11230000 { clocks = <&pericfg CLK_PERI_MSDC30_0_PD>, <&topckgen CLK_TOP_MSDC50_0_SEL>; clock-names = "source", "hclk"; + resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>; + reset-names = "hrst"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 34d249d85da7653a9252ac1fff3c653aea9da2db..8eb61dd9921ebf007c3af265e6bc6a2d09c84ca3 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -337,8 +337,9 @@ sdmmc1: mmc@3400000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03400000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC1>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC1>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>, @@ -366,8 +367,9 @@ sdmmc2: mmc@3420000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03420000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC2>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC2>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>, @@ -390,8 +392,9 @@ sdmmc3: mmc@3440000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03440000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC3>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC3>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>, @@ -416,8 +419,9 @@ sdmmc4: mmc@3460000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03460000 0x0 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA186_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA186_CLK_SDMMC4>, + <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>, <&bpmp TEGRA186_CLK_PLLC4_VCO>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 48160f48003aa65dc9ff0b3a2108769916d4023f..ca5cb6aef5ee43606bef3eb856a5760787ab851e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -460,8 +460,9 @@ sdmmc1: mmc@3400000 { compatible = "nvidia,tegra194-sdhci"; reg = <0x03400000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC1>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC1>; reset-names = "sdhci"; interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>, @@ -485,8 +486,9 @@ sdmmc3: mmc@3440000 { compatible = "nvidia,tegra194-sdhci"; reg = <0x03440000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC3>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA194_RESET_SDMMC3>; reset-names = "sdhci"; interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>, @@ -511,8 +513,9 @@ sdmmc4: mmc@3460000 { compatible = "nvidia,tegra194-sdhci"; reg = <0x03460000 0x10000>; interrupts = ; - clocks = <&bpmp TEGRA194_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&bpmp TEGRA194_CLK_SDMMC4>, + <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>; + clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>, <&bpmp TEGRA194_CLK_PLLC4>; assigned-clock-parents = diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 829f786af1338d80aab566d6db830f296c320551..8cca2166a446ab7e384ae8236bb4ab95863301b4 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -1194,8 +1194,9 @@ mmc@700b0000 { compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0000 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC1>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC1>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 14>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1222,8 +1223,9 @@ mmc@700b0200 { compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0200 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC2>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC2>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 9>; reset-names = "sdhci"; pinctrl-names = "sdmmc-1v8-drv"; @@ -1239,8 +1241,9 @@ mmc@700b0400 { compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0400 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC3>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC3>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 69>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", @@ -1262,8 +1265,9 @@ mmc@700b0600 { compatible = "nvidia,tegra210-sdhci"; reg = <0x0 0x700b0600 0x0 0x200>; interrupts = ; - clocks = <&tegra_car TEGRA210_CLK_SDMMC4>; - clock-names = "sdhci"; + clocks = <&tegra_car TEGRA210_CLK_SDMMC4>, + <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>; + clock-names = "sdhci", "tmclk"; resets = <&tegra_car 15>; reset-names = "sdhci"; pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv"; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 9edfae5944f727a700a336e3b928fb91280e131a..24ef18fe77df07fb24a05ac5297a546ca1f374e7 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -417,10 +417,10 @@ intr_main_gpio: interrupt-controller0 { ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x1>; + ti,sci-dev-id = <100>; + ti,interrupt-ranges = <0 392 32>; }; main_navss { @@ -438,10 +438,11 @@ intr_main_navss: interrupt-controller1 { ti,intr-trigger-type = <4>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x0>, <0x2>; + ti,sci-dev-id = <182>; + ti,interrupt-ranges = <0 64 64>, + <64 448 64>; }; inta_main_udmass: interrupt-controller@33d00000 { @@ -452,8 +453,7 @@ inta_main_udmass: interrupt-controller@33d00000 { msi-controller; ti,sci = <&dmsc>; ti,sci-dev-id = <179>; - ti,sci-rm-range-vint = <0x0>; - ti,sci-rm-range-global-event = <0x1>; + ti,interrupt-ranges = <0 0 256>; }; secure_proxy_main: mailbox@32c00000 { @@ -589,7 +589,7 @@ ringacc: ringacc@3c000000 { <0x0 0x33000000 0x0 0x40000>; reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target"; ti,num-rings = <818>; - ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */ + ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */ ti,dma-ring-reset-quirk; ti,sci = <&dmsc>; ti,sci-dev-id = <187>; @@ -609,11 +609,11 @@ main_udmap: dma-controller@31150000 { ti,sci-dev-id = <188>; ti,ringacc = <&ringacc>; - ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */ - <0x2>; /* TX_CHAN */ - ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */ - <0x5>; /* RX_CHAN */ - ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */ + ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */ + <0xd>; /* TX_CHAN */ + ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */ + <0xa>; /* RX_CHAN */ + ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */ }; cpts@310d0000 { @@ -622,7 +622,7 @@ cpts@310d0000 { reg-names = "cpts"; clocks = <&main_cpts_mux>; clock-names = "cpts"; - interrupts-extended = <&intr_main_navss 163 0>; + interrupts-extended = <&intr_main_navss 391>; interrupt-names = "cpts"; ti,cpts-periodic-outputs = <6>; ti,cpts-ext-ts-inputs = <8>; @@ -645,8 +645,7 @@ main_gpio0: main_gpio0@600000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&intr_main_gpio>; - interrupts = <57 256>, <57 257>, <57 258>, <57 259>, <57 260>, - <57 261>; + interrupts = <192>, <193>, <194>, <195>, <196>, <197>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <96>; @@ -661,8 +660,7 @@ main_gpio1: main_gpio1@601000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&intr_main_gpio>; - interrupts = <58 256>, <58 257>, <58 258>, <58 259>, <58 260>, - <58 261>; + interrupts = <200>, <201>, <202>, <203>, <204>, <205>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <90>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index 8c1abcfe08605c18c339709c71c26431ace4d1e3..51ca4b4d4c21438d43de8b1bbc2a80a92b4d8df7 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -134,7 +134,7 @@ mcu_ringacc: ringacc@2b800000 { <0x0 0x2a500000 0x0 0x40000>; reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target"; ti,num-rings = <286>; - ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */ + ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */ ti,dma-ring-reset-quirk; ti,sci = <&dmsc>; ti,sci-dev-id = <195>; @@ -154,11 +154,11 @@ mcu_udmap: dma-controller@285c0000 { ti,sci-dev-id = <194>; ti,ringacc = <&mcu_ringacc>; - ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */ - <0x2>; /* TX_CHAN */ - ti,sci-rm-range-rchan = <0x3>, /* RX_HCHAN */ - <0x4>; /* RX_CHAN */ - ti,sci-rm-range-rflow = <0x5>; /* GP RFLOW */ + ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */ + <0xd>; /* TX_CHAN */ + ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */ + <0xa>; /* RX_CHAN */ + ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */ }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi index 5f55b9e82cf1e467de6f92fdfc89bcb882d19e4a..a1ffe88d966414811ed9a8f8f1646c550c5d2a85 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi @@ -74,10 +74,10 @@ intr_wkup_gpio: interrupt-controller2 { ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x4>; + ti,sci-dev-id = <156>; + ti,interrupt-ranges = <0 712 16>; }; wkup_gpio0: wkup_gpio0@42110000 { @@ -86,7 +86,7 @@ wkup_gpio0: wkup_gpio0@42110000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&intr_wkup_gpio>; - interrupts = <59 128>, <59 129>, <59 130>, <59 131>; + interrupts = <60>, <61>, <62>, <63>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <56>; diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index 611e6620701078e0e7a0fc9b8374834ba67d7029..b8a8a0fcb8af1a6c04e26aca57e16053af5087d4 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -384,7 +384,7 @@ &pcie1_ep { }; &mailbox0_cluster0 { - interrupts = <164 0>; + interrupts = <436>; mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 { ti,mbox-tx = <1 0 0>; @@ -393,7 +393,7 @@ mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 { }; &mailbox0_cluster1 { - interrupts = <165 0>; + interrupts = <432>; mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 { ti,mbox-tx = <1 0 0>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts index 8bc1e6ecc50ebcf083829bf3f8d509abfb92ca0b..e8fc01d97adadaddb8d253d33e3358af06ad55fe 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts @@ -287,7 +287,7 @@ &wkup_gpio1 { }; &mailbox0_cluster0 { - interrupts = <214 0>; + interrupts = <436>; mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 { ti,mbox-rx = <0 0 0>; @@ -301,7 +301,7 @@ mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 { }; &mailbox0_cluster1 { - interrupts = <215 0>; + interrupts = <432>; mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 { ti,mbox-rx = <0 0 0>; @@ -315,7 +315,7 @@ mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 { }; &mailbox0_cluster2 { - interrupts = <216 0>; + interrupts = <428>; mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 { ti,mbox-rx = <0 0 0>; @@ -329,7 +329,7 @@ mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 { }; &mailbox0_cluster3 { - interrupts = <217 0>; + interrupts = <424>; mbox_c66_0: mbox-c66-0 { ti,mbox-rx = <0 0 0>; @@ -343,7 +343,7 @@ mbox_c66_1: mbox-c66-1 { }; &mailbox0_cluster4 { - interrupts = <218 0>; + interrupts = <420>; mbox_c71_0: mbox-c71-0 { ti,mbox-rx = <0 0 0>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index d14060207f0097f31dd106bc945720d4eaa4e719..12ceea9b3c9aef8da1ea4610ba3cd7083d9fd722 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -80,10 +80,10 @@ main_gpio_intr: interrupt-controller0 { ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <14>; - ti,sci-rm-range-girq = <0x1>; + ti,sci-dev-id = <131>; + ti,interrupt-ranges = <8 392 56>; }; main_navss { @@ -101,10 +101,12 @@ main_navss_intr: interrupt-controller1 { ti,intr-trigger-type = <4>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <14>; - ti,sci-rm-range-girq = <0>, <2>; + ti,sci-dev-id = <213>; + ti,interrupt-ranges = <0 64 64>, + <64 448 64>, + <128 672 64>; }; main_udmass_inta: interrupt-controller@33d00000 { @@ -115,8 +117,7 @@ main_udmass_inta: interrupt-controller@33d00000 { msi-controller; ti,sci = <&dmsc>; ti,sci-dev-id = <209>; - ti,sci-rm-range-vint = <0xa>; - ti,sci-rm-range-global-event = <0xd>; + ti,interrupt-ranges = <0 0 256>; }; secure_proxy_main: mailbox@32c00000 { @@ -296,7 +297,7 @@ cpts@310d0000 { reg-names = "cpts"; clocks = <&k3_clks 201 1>; clock-names = "cpts"; - interrupts-extended = <&main_navss_intr 201 0>; + interrupts-extended = <&main_navss_intr 391>; interrupt-names = "cpts"; ti,cpts-periodic-outputs = <6>; ti,cpts-ext-ts-inputs = <8>; @@ -688,8 +689,8 @@ main_gpio0: gpio@600000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <105 0>, <105 1>, <105 2>, <105 3>, - <105 4>, <105 5>, <105 6>, <105 7>; + interrupts = <256>, <257>, <258>, <259>, + <260>, <261>, <262>, <263>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -705,7 +706,7 @@ main_gpio1: gpio@601000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <106 0>, <106 1>, <106 2>; + interrupts = <288>, <289>, <290>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; @@ -721,8 +722,8 @@ main_gpio2: gpio@610000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <107 0>, <107 1>, <107 2>, <107 3>, - <107 4>, <107 5>, <107 6>, <107 7>; + interrupts = <264>, <265>, <266>, <267>, + <268>, <269>, <270>, <271>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -738,7 +739,7 @@ main_gpio3: gpio@611000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <108 0>, <108 1>, <108 2>; + interrupts = <292>, <293>, <294>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; @@ -754,8 +755,8 @@ main_gpio4: gpio@620000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <109 0>, <109 1>, <109 2>, <109 3>, - <109 4>, <109 5>, <109 6>, <109 7>; + interrupts = <272>, <273>, <274>, <275>, + <276>, <277>, <278>, <279>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -771,7 +772,7 @@ main_gpio5: gpio@621000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <110 0>, <110 1>, <110 2>; + interrupts = <296>, <297>, <298>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; @@ -787,8 +788,8 @@ main_gpio6: gpio@630000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <111 0>, <111 1>, <111 2>, <111 3>, - <111 4>, <111 5>, <111 6>, <111 7>; + interrupts = <280>, <281>, <282>, <283>, + <284>, <285>, <286>, <287>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -804,7 +805,7 @@ main_gpio7: gpio@631000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <112 0>, <112 1>, <112 2>; + interrupts = <300>, <301>, <302>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index 30a735bcd0c894f37ef53ba7ffee9f1d8bb90c41..c4a48e8d420a2b42e280f22d72373260575db7ee 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -101,10 +101,10 @@ wkup_gpio_intr: interrupt-controller2 { ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <14>; - ti,sci-rm-range-girq = <0x5>; + ti,sci-dev-id = <137>; + ti,interrupt-ranges = <16 960 16>; }; wkup_gpio0: gpio@42110000 { @@ -113,8 +113,7 @@ wkup_gpio0: gpio@42110000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&wkup_gpio_intr>; - interrupts = <113 0>, <113 1>, <113 2>, - <113 3>, <113 4>, <113 5>; + interrupts = <103>, <104>, <105>, <106>, <107>, <108>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <84>; @@ -130,8 +129,7 @@ wkup_gpio1: gpio@42100000 { gpio-controller; #gpio-cells = <2>; interrupt-parent = <&wkup_gpio_intr>; - interrupts = <114 0>, <114 1>, <114 2>, - <114 3>, <114 4>, <114 5>; + interrupts = <112>, <113>, <114>, <115>, <116>, <117>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <84>; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index 9174ddc76bdc3a9ce651d0f25f844fc1afe2d31c..3ec99f13c259e34fc6180d430f3cd52abbcb772d 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -13,6 +13,7 @@ */ #include +#include / { compatible = "xlnx,zynqmp"; @@ -558,6 +559,15 @@ pcie_intc: legacy-interrupt-controller { }; }; + psgtr: phy@fd400000 { + compatible = "xlnx,zynqmp-psgtr-v1.1"; + status = "disabled"; + reg = <0x0 0xfd400000 0x0 0x40000>, + <0x0 0xfd3d0000 0x0 0x1000>; + reg-names = "serdes", "siou"; + #phy-cells = <4>; + }; + rtc: rtc@ffa60000 { compatible = "xlnx,zynqmp-rtc"; status = "disabled"; @@ -601,7 +611,7 @@ sdhci1: mmc@ff170000 { power-domains = <&zynqmp_firmware PD_SD_1>; }; - smmu: smmu@fd800000 { + smmu: iommu@fd800000 { compatible = "arm,mmu-500"; reg = <0x0 0xfd800000 0x0 0x20000>; status = "disabled"; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index e0f33826819f1e0f0e038dec3dc13da7c5034228..6d04b9577b0bddf95a219e6b00e47c0ae590d894 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -724,6 +724,17 @@ CONFIG_USB_GADGET=y CONFIG_USB_RENESAS_USBHS_UDC=m CONFIG_USB_RENESAS_USB3=m CONFIG_USB_TEGRA_XUDC=m +CONFIG_USB_CONFIGFS=m +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_ACM=y +CONFIG_USB_CONFIGFS_OBEX=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_EEM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_FUSB302=m @@ -914,6 +925,7 @@ CONFIG_ARCH_TEGRA_194_SOC=y CONFIG_ARCH_K3_AM6_SOC=y CONFIG_ARCH_K3_J721E_SOC=y CONFIG_TI_SCI_PM_DOMAINS=y +CONFIG_EXTCON_PTN5150=m CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USBC_CROS_EC=y CONFIG_IIO=y diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index 51a7ce87cdfe07c55409d766118dfedda56d9535..6fb2e6bcc392fe8b0f5e7ebee1a1893fdc4c548d 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -2,6 +2,12 @@ #ifndef __ASM_COMPILER_H #define __ASM_COMPILER_H +#ifdef ARM64_ASM_ARCH +#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n" +#else +#define ARM64_ASM_PREAMBLE +#endif + /* * The EL0/EL1 pointer bits used by a pointer authentication code. * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index aa4b6521ef14412796c6ddd1367d33ff9652c278..ff328e5bbb75721e93d1df93bf43cf55aae6f492 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return res; } +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + static inline unsigned long arch_local_irq_save(void) { unsigned long flags; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 51c1d9918999205d9034781f5ea8a68a335201b2..1da8e3dc4455511de6b14f37e23a9533eaa144fe 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -71,11 +71,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index fb1a922b31ba2167d0d51467a17312f25037f436..6f98fbd0ac8162cc1b165aea059b68bbb20af4ae 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -169,6 +169,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp @@ -193,6 +221,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 65568b23868a1f6fea3936d267d805a15a02575c..905c2b87e05acc8fb778b11a2220cccaa90041d4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -368,7 +368,6 @@ struct kvm_vcpu_arch { /* Guest PV state */ struct { - u64 steal; u64 last_steal; gpa_t base; } steal; @@ -473,7 +472,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); @@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); void kvm_update_stolen_time(struct kvm_vcpu *vcpu); +bool kvm_arm_pvtime_supported(void); int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index d493174415dbf55a4ade7d13b7379988e990829d..cc3f5a33ff9c543c2911a7c33d0503b7d981a429 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -28,14 +28,16 @@ * not. The macros handles invoking the asm with or without the * register argument as appropriate. */ -#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ +#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op "\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op, \ ARM64_WORKAROUND_REPEAT_TLBI, \ CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ : : ) -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ +#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op ", %0\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op ", %0", \ ARM64_WORKAROUND_REPEAT_TLBI, \ diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 4559664011026a5920788b192cc832c921c2a922..a85174d0547370aa7944c4031ce1861205e54eec 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -322,7 +322,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) */ if (memblock_is_map_memory(phys)) return (void __iomem *)__phys_to_virt(phys); - /* fall through */ + fallthrough; default: if (region->attribute & EFI_MEMORY_WB) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6bd1d3ad037ad492634416f16d4022015acab125..c332d49780dc96c6afe8cbe7b88f7cbef3d727b9 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -910,6 +910,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM erratum 1418040", .capability = ARM64_WORKAROUND_1418040, ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), + .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), }, #endif #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a389b999482e739db360cb75da86721df3c49270..6424584be01e6dd0a6d5e846d8e205dad6a6f1c0 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -686,7 +686,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_HIGHER_OR_ZERO_SAFE: if (!cur || !new) break; - /* Fallthrough */ + fallthrough; case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 393c6fb1f1cb438c2cf0cb09ba922cf1e2250e59..d0076c2159e6676daf00d12b03b72ea0b1f263be 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -327,7 +327,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) set_bit(ICACHEF_VPIPT, &__icache_flags); break; default: - /* Fallthrough */ case ICACHE_POLICY_VIPT: /* Assume aliasing */ set_bit(ICACHEF_ALIASING, &__icache_flags); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2646178c832944e00005f31a4c00467c4a953639..55af8b504b65abb2164f350e7019db108785d482 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -170,19 +170,6 @@ alternative_cb_end stp x28, x29, [sp, #16 * 14] .if \el == 0 - .if \regsize == 32 - /* - * If we're returning from a 32-bit task on a system affected by - * 1418040 then re-enable userspace access to the virtual counter. - */ -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if ARM64_WORKAROUND_1418040 - mrs x0, cntkctl_el1 - orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x0 -alternative_else_nop_endif -#endif - .endif clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 @@ -294,14 +281,6 @@ alternative_else_nop_endif tst x22, #PSR_MODE32_BIT // native task? b.eq 3f -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if ARM64_WORKAROUND_1418040 - mrs x0, cntkctl_el1 - bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x0 -alternative_else_nop_endif -#endif - #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index af234a1e08b7d29f66a2d9c2ff2fb7e0be74142b..712e97c03e54c287e4f6e9ee0e5d02ee3665a3cf 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp, * level. */ enable_debug_monitors(dbg_el); - /* Fall through */ + fallthrough; case HW_BREAKPOINT_RESTORE: /* Setup the address register. */ write_wb_reg(val_reg, i, info->address); @@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Fallthrough */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Fallthrough */ + fallthrough; default: return -EINVAL; } diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 9e897c500237dfe449abdeba47257a9de5eba3b9..8982b68289b79f2d8177ed9e8eb5d861e14df0ef 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); KVM_NVHE_ALIAS(gic_pmr_sync); #endif +/* EL2 exception handling */ +KVM_NVHE_ALIAS(__start___kvm_ex_table); +KVM_NVHE_ALIAS(__stop___kvm_ex_table); + #endif /* CONFIG_KVM */ #endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 0ce3a28e33479284880fccabfe49224504d46930..2e224435c0249ac28c5eb63f8577250a3ff89f40 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -305,8 +305,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt_shndx = i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt_shndx = i; - else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && - !strcmp(secstrings + sechdrs[i].sh_name, + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".text.ftrace_trampoline")) tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1cd1a4d0ed30e651b2f8dc854933991f1f69e15d..2a1ad95d9b2ccd8a32ff9ee0f7f1748983561d5c 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); @@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 84ec630b8ab5d851f8a6c0dc2a292c4925d2260b..f1804496b93508caad6174a635dbe91e98dfeedf 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -123,10 +123,8 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ - trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_HOTPLUG_CPU @@ -515,6 +513,39 @@ static void entry_task_switch(struct task_struct *next) __this_cpu_write(__entry_task, next); } +/* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Assuming the virtual counter is enabled at the beginning of times: + * + * - disable access when switching from a 64bit task to a 32bit task + * - enable access when switching from a 32bit task to a 64bit task + */ +static void erratum_1418040_thread_switch(struct task_struct *prev, + struct task_struct *next) +{ + bool prev32, next32; + u64 val; + + if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && + cpus_have_const_cap(ARM64_WORKAROUND_1418040))) + return; + + prev32 = is_compat_thread(task_thread_info(prev)); + next32 = is_compat_thread(task_thread_info(next)); + + if (prev32 == next32) + return; + + val = read_sysreg(cntkctl_el1); + + if (!next32) + val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + else + val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + + write_sysreg(val, cntkctl_el1); +} + /* * Thread switching. */ @@ -530,6 +561,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); + erratum_1418040_thread_switch(prev, next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 77c4c9bad1b8527280428bde70c38f37f4410fc1..53acbeca4f574f9528feb6b64dd273496698fa03 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -280,7 +280,6 @@ u64 cpu_logical_map(int cpu) { return __cpu_logical_map[cpu]; } -EXPORT_SYMBOL_GPL(cpu_logical_map); void __init __no_sanitize_address setup_arch(char **cmdline_p) { diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 03957a1ae6c0cc27a16e6011f61a3a83c8bf6183..355ee9eed4dded61a666edd0944aa14a512128e9 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,7 +151,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) break; } pr_crit("CPU%u: may not have shut down cleanly\n", cpu); - /* Fall through */ + fallthrough; case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); if (status & CPU_STUCK_REASON_52_BIT_VA) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 5139a5f19256822f74fc7ed29b558c6f71a7e841..d6adb4677c25f801ba16ee25294c1e9ebd7d7678 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ # Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ +quiet_cmd_vdso_install = INSTALL32 $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so vdso.so: $(obj)/vdso.so.dbg diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index ec8e894684a78eee91df855949da3a5eb309528b..7cba7623fcec75a7411c3388a3d5b516e46c49cc 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -20,6 +20,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -35,6 +42,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 691d21e4c717e19821dc843818496a69509493ad..b588c3b5c2f07b580be180ff727b3ff2db7d7c78 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) */ r = 1; break; + case KVM_CAP_STEAL_TIME: + r = kvm_arm_pvtime_supported(); + break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; @@ -1640,6 +1643,10 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)) + kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ + "Only trusted guests should be used on this system.\n"); + for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); if (ret < 0) { diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index fe6c7d79309d6724825ee7429b2cb0bc944beb4d..5d690d60ccad53bc6889365196537dcee26d2ab2 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; - /* fall through */ + fallthrough; case ESR_ELx_EC_SOFTSTP_LOW: case ESR_ELx_EC_BREAKPT_LOW: case ESR_ELx_EC_BKPT32: diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ee32a77433898da3e24bafcbee5d8c64ea2ab4d4..76e7eaf4675eb4b7a148ab06c4db62c33dc2db96 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,20 +196,23 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: msr daifset, #4 // Mask aborts + ret + + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 689fccbc9de72d84894f0383b469dfeaba8894b8..46b4dab933d0e7c415a9deff583565970bdba1c1 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,30 @@ #include #include +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .macro do_el2_call @@ -143,13 +167,19 @@ el1_error: b __guest_exit el2_sync: - /* Check for illegal exception return, otherwise panic */ + /* Check for illegal exception return */ mrs x0, spsr_el2 + tbnz x0, #20, 1f - /* if this was something else, then panic! */ - tst x0, #PSR_IL_BIT - b.eq __hyp_panic + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + eret + +1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL @@ -157,27 +187,14 @@ el2_sync: el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index 0297dc63988c0faa897234524a985ac334ceb025..5e28ea6aa097b026c0b61fd37f5be18ab34ba649 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -21,70 +21,70 @@ #define save_debug(ptr,reg,nr) \ switch (nr) { \ case 15: ptr[15] = read_debug(reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: ptr[14] = read_debug(reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: ptr[13] = read_debug(reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: ptr[12] = read_debug(reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: ptr[11] = read_debug(reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: ptr[10] = read_debug(reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: ptr[9] = read_debug(reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: ptr[8] = read_debug(reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: ptr[7] = read_debug(reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: ptr[6] = read_debug(reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: ptr[5] = read_debug(reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: ptr[4] = read_debug(reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: ptr[3] = read_debug(reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: ptr[2] = read_debug(reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: ptr[1] = read_debug(reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: ptr[0] = read_debug(reg, 0); \ } #define restore_debug(ptr,reg,nr) \ switch (nr) { \ case 15: write_debug(ptr[15], reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: write_debug(ptr[14], reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: write_debug(ptr[13], reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: write_debug(ptr[12], reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: write_debug(ptr[11], reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: write_debug(ptr[10], reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: write_debug(ptr[9], reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: write_debug(ptr[8], reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: write_debug(ptr[7], reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: write_debug(ptr[6], reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: write_debug(ptr[5], reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: write_debug(ptr[4], reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: write_debug(ptr[3], reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: write_debug(ptr[2], reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: write_debug(ptr[1], reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: write_debug(ptr[0], reg, 0); \ } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 426ef65601dd9c82cc71828012bd0ac110514e8b..5b6b8fa00f0af061156e0077caa9d60f335a5b09 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,9 @@ extern const char __hyp_panic_string[]; +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & SYS_PAR_EL1_F)) @@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) #endif } +static inline void __kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} + #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 341be2f2f31261c36563b67d818d7f52b0c2ec05..0970442d2dbcf779c44d5f88155c65c3705712b8 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) read_sysreg(hpfar_el2), par, vcpu); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5a0073511efb65dbae5fdb9fd7bb9b689d09d054..452f4cacd674351d2adb514a5c2f1b1269e2934c 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); } @@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); } @@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); } @@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c52d714e0d7592962011c709d7d728eb87d25375..c1da4f86ccacac9c65b0525265b2568f94217a57 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) __hyp_call_panic(spsr, elr, par, host_ctxt); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0121ef2c7c8d2f720a9377257058bf090228811c..9a636b8064f1feae8d7cc00f18102957d76a46ff 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, + bool may_block) { struct kvm *kvm = mmu->kvm; pgd_t *pgd; @@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(mmu, start, size, true); +} + static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -1871,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { force_pte = true; vma_pagesize = PAGE_SIZE; + vma_shift = PAGE_SHIFT; } /* @@ -1964,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, (fault_status == FSC_PERM && stage2_is_exec(mmu, fault_ipa, vma_pagesize)); - if (vma_pagesize == PUD_SIZE) { + /* + * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and + * all we have is a 2-level page table. Trying to map a PUD in + * this case would be fatally wrong. + */ + if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); new_pud = kvm_pud_mkhuge(new_pud); @@ -2208,18 +2220,21 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(&kvm->arch.mmu, gpa, size); + unsigned flags = *(unsigned *)data; + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; + + __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, unsigned flags) { if (!kvm->arch.mmu.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); return 0; } diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index f7b52ce1557ec393635d1fb71aa4a76274627335..920ac43077ad3b48023c917bdd2eb8fb925e22dc 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -13,25 +13,22 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - u64 steal; - __le64 steal_le; - u64 offset; - int idx; u64 base = vcpu->arch.steal.base; + u64 last_steal = vcpu->arch.steal.last_steal; + u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); + u64 steal = 0; + int idx; if (base == GPA_INVALID) return; - /* Let's do the local bookkeeping */ - steal = vcpu->arch.steal.steal; - steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal; - vcpu->arch.steal.last_steal = current->sched_info.run_delay; - vcpu->arch.steal.steal = steal; - - steal_le = cpu_to_le64(steal); idx = srcu_read_lock(&kvm->srcu); - offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); - kvm_put_guest(kvm, base + offset, steal_le, u64); + if (!kvm_get_guest(kvm, base + offset, steal)) { + steal = le64_to_cpu(steal); + vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); + steal += vcpu->arch.steal.last_steal - last_steal; + kvm_put_guest(kvm, base + offset, cpu_to_le64(steal)); + } srcu_read_unlock(&kvm->srcu, idx); } @@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu) switch (feature) { case ARM_SMCCC_HV_PV_TIME_FEATURES: case ARM_SMCCC_HV_PV_TIME_ST: - val = SMCCC_RET_SUCCESS; + if (vcpu->arch.steal.base != GPA_INVALID) + val = SMCCC_RET_SUCCESS; break; } @@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) * Start counting stolen time from the time the guest requests * the feature enabled. */ - vcpu->arch.steal.steal = 0; vcpu->arch.steal.last_steal = current->sched_info.run_delay; idx = srcu_read_lock(&kvm->srcu); @@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) return base; } -static bool kvm_arm_pvtime_supported(void) +bool kvm_arm_pvtime_supported(void) { return !!sched_info_on(); } diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 4691053c5ee426dc8601f009ac73e41972a8788a..ff0444352bba3eea5dfe8f1634a3d615a552b0f3 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry, __entry->vcpu_pc = vcpu_pc; ), - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) + TP_printk("PC: 0x%016lx", __entry->vcpu_pc) ); TRACE_EVENT(kvm_exit, @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit, __entry->vcpu_pc = vcpu_pc; ), - TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx", + TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx", __print_symbolic(__entry->ret, kvm_arm_exception_type), __entry->esr_ec, __print_symbolic(__entry->esr_ec, kvm_arm_exception_class), @@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault, __entry->ipa = ipa; ), - TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx", + TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx", __entry->ipa, __entry->hsr, __entry->hxfar, __entry->vcpu_pc) ); @@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate, __entry->cpsr = cpsr; ), - TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", + TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)", __entry->vcpu_pc, __entry->instr, __entry->cpsr) ); @@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->end = end; ), - TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", + TP_printk("mmu notifier unmap range: %#016lx -- %#016lx", __entry->start, __entry->end) ); @@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva, __entry->hva = hva; ), - TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) + TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) ); TRACE_EVENT(kvm_age_hva, @@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva, __entry->end = end; ), - TP_printk("mmu notifier age hva: %#08lx -- %#08lx", + TP_printk("mmu notifier age hva: %#016lx -- %#016lx", __entry->start, __entry->end) ); @@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva, __entry->hva = hva; ), - TP_printk("mmu notifier test age hva: %#08lx", __entry->hva) + TP_printk("mmu notifier test age hva: %#016lx", __entry->hva) ); TRACE_EVENT(kvm_set_way_flush, diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h index 2c56d1e0f5bd543060ea99185409cee510dd6407..8d78acc4fba7efd0837ea396c751373b787dde87 100644 --- a/arch/arm64/kvm/trace_handle_exit.h +++ b/arch/arm64/kvm/trace_handle_exit.h @@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64, __entry->is_wfe = is_wfe; ), - TP_printk("guest executed wf%c at: 0x%08lx", + TP_printk("guest executed wf%c at: 0x%016lx", __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc) ); @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64, __entry->imm = imm; ), - TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)", + TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)", __entry->vcpu_pc, __entry->r0, __entry->imm) ); @@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg, __entry->write_value = write_value; ), - TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) + TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) ); TRACE_EVENT(kvm_handle_sys_reg, diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index a206655a39a52b5d5ff139006c8aad7ba8d13615..9b11c096a04230e90cf47e41c0b51f3b35ab6ce3 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -45,7 +45,7 @@ static u32 get_cpu_asid_bits(void) default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); - /* Fallthrough */ + fallthrough; case 0: asid = 8; break; diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index e456652facce5177b975ec466d4e743c086d600c..d05c78eace1bf7f6ca1dc5f5e689dd12e637b0e0 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c @@ -220,7 +220,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->a4 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: regs->a4 = regs->orig_a4; @@ -252,7 +252,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs, break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a4 = regs->orig_a4; regs->pc -= 4; diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 9452d6570b7e8126e51c8921cfe64244fd2fb4b1..970895df75ec3ea1186528c036e2a0965eb5a089 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -194,7 +194,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->a0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->pc -= TRAP0_SIZE; diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index 38d335488a54bd42edb80a7b3611e3e9e08ef142..69e68949787febb37f24931a254f9489d876b2bf 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -227,7 +227,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka) regs->er0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: regs->er0 = regs->orig_er0; diff --git a/arch/hexagon/kernel/module.c b/arch/hexagon/kernel/module.c index cf99fb79a12411d3b8a901f74e1c6bdf7c2debe5..cb3bf19b064007442e6145fd209de2293a9e637d 100644 --- a/arch/hexagon/kernel/module.c +++ b/arch/hexagon/kernel/module.c @@ -120,7 +120,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } case R_HEXAGON_HI16: value = (value>>16) & 0xffff; - /* fallthrough */ + fallthrough; case R_HEXAGON_LO16: *location &= ~0x00c03fff; *location |= value & 0x3fff; diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index d48864c48e5ac49cf6210a35b793e4cfc710fa3c..94cc7ff52dce80eeceec6c1d57f22777704cc840 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -155,7 +155,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->r00 = -EINTR; break; } - /* Fall through */ + fallthrough; case -ERESTARTNOINTR: regs->r06 = regs->syscall_nr; pt_set_elr(regs, pt_elr(regs) - 4); diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 10850897a91c4dd1cf4cad798fbe3b8ae2a9c318..779b6972aa84b38186cacc00402ae7c0e22bfbc1 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -366,6 +366,15 @@ pgd_index (unsigned long address) } #define pgd_index pgd_index +/* + * In the kernel's mapped region we know everything is in region number 5, so + * as an optimisation its PGD already points to the area for that region. + * However, this also means that we cannot use pgd_index() and we must + * never add the region here. + */ +#define pgd_offset_k(addr) \ + (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) + /* Look up a pgd entry in the gate area. On IA-64, the gate-area resides in the kernel-mapped segment, hence we use pgd_offset_k() here. */ diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index bec762a9b418d3a52d12a9ebe890ad103c8ba8fb..fec70d662d0c2da737308bfbdd74e45d6088a5d4 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -163,7 +163,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) case DIE_INIT_MONARCH_LEAVE: if (!kdump_freeze_monarch) break; - /* fall through */ + fallthrough; case DIE_INIT_SLAVE_LEAVE: case DIE_INIT_MONARCH_ENTER: case DIE_MCA_RENDZVOUS_LEAVE: diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index b49fe6f618edb1dc78f2bd371998b56613b56ede..f8150ee74f2974f3dc7b98fcf84902e22de2a03d 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -3,7 +3,7 @@ * Architecture-specific kernel symbols */ -#ifdef CONFIG_VIRTUAL_MEM_MAP +#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM) #include #include #include diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 1a42ba885188a5c8e865bb28108c685ed1a37515..00a496cb346f6ebd4400e2314022ecc6b13a5619 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -654,7 +654,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, } } else if (!is_internal(mod, val)) val = get_plt(mod, location, val, &ok); - /* FALL THROUGH */ + fallthrough; default: val -= bundle(location); break; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 971f166873aa693b7d2a2fa5872c143f5bbea384..0dc3611e79715650af7a0471a64ba269993a8798 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -3472,7 +3472,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) break; case PFM_CTX_LOADED: if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; - /* fall through */ + fallthrough; case PFM_CTX_UNLOADED: case PFM_CTX_ZOMBIE: DPRINT(("invalid state=%d\n", state)); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index d07ed65c9c6e95481f5f8aa2cb4119a43875d960..e67b22fc3c60b7dbe47494b836a5cf5ea583ee5e 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -374,7 +374,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) /* note: scr->pt.r10 is already -1 */ break; } - /*FALLTHRU*/ + fallthrough; case ERESTARTNOINTR: ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 2d4e65ba5c3e9025bb0ccad498a0ee5f6624bccc..6c1a8951dfbb81aa76e5a0425170168ea23ed568 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -1431,7 +1431,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; - /*FALLTHRU*/ + fallthrough; case LDS_IMM_OP: case LDSA_IMM_OP: case LDFS_OP: @@ -1459,7 +1459,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; - /*FALLTHRU*/ + fallthrough; case LD_IMM_OP: case LDA_IMM_OP: case LDBIAS_IMM_OP: @@ -1475,7 +1475,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; - /*FALLTHRU*/ + fallthrough; case ST_IMM_OP: case STREL_IMM_OP: ret = emulate_store_int(ifa, u.insn, regs); diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index 7601fe0622d25e26ee083b317d159479df16ddf0..6bd64c35e691ae9e4a8c00182f436172d0a09f73 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c @@ -324,7 +324,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char return 0; } } - /* fall through */ + fallthrough; case UNW_NAT_NONE: dummy_nat = 0; nat_addr = &dummy_nat; diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c index 37091898adb3d3b54249df3f50558337d72ce751..5e0e682f9c61a60ef3a2370aaab7573d7690f99e 100644 --- a/arch/m68k/atari/atakeyb.c +++ b/arch/m68k/atari/atakeyb.c @@ -207,7 +207,7 @@ static irqreturn_t atari_keyboard_interrupt(int irq, void *dummy) self_test_last_rcv = jiffies; break; } - /* FALL THROUGH */ + fallthrough; default: break_flag = scancode & BREAK_MASK; diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index fc034fd19798eb783c15c2487cbb9a360c3da6d2..a98fca97707375aab338e9f06ab6e9611da97bdf 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -1067,7 +1067,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->d0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: regs->d0 = regs->orig_d0; diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 5c9f3a2d6538821fcbbf9b3d492f73657d401bb6..a621fcc1a576ac06ddf2d1468615c7b44a7c00ab 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -1018,7 +1018,7 @@ int __init mac_platform_init(void) */ platform_device_register_simple("mac_scsi", 1, mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc)); - /* fall through */ + fallthrough; case MAC_SCSI_OLD: /* Addresses from Developer Notes for Duo System, * PowerBook 180 & 160, 140 & 170, Macintosh IIsi diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index 1f0fad2a98a07dd437a1486fb18f09cc48558f55..ac77d73af19a59dec55410a784aa0982525828a0 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -370,7 +370,7 @@ void via_nubus_irq_startup(int irq) /* Allow NuBus slots 9 through F. */ via2[vDirA] &= 0x80 | ~(1 << irq_idx); } - /* fall through */ + fallthrough; case MAC_VIA_IICI: via_irq_enable(irq); break; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 795f483b105091a6c65942cda524a54375bd2d9a..ef46e77e97a5b35ad0315691af456ca90f1870fb 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -118,7 +118,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, pr_debug("do_page_fault: good_area\n"); switch (error_code & 3) { default: /* 3: write, present */ - /* fall through */ + fallthrough; case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto acc_err; diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 65bf5fd8d4733b63e68edb627dd8543899a2a0f8..4a96b59f0beed3835854c266d4ac4c39ee89ff04 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -249,7 +249,7 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->r3 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: do_restart: /* offset of 4 bytes to re-execute trap (brki) instruction */ diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 0880a003573d9d3d1d0d8e14623fce8cf379524d..3344d4a1fe890c9af7fbb8ba62aaad4ac6d797a0 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -46,6 +46,9 @@ unsigned long memory_size; EXPORT_SYMBOL(memory_size); unsigned long lowmem_size; +EXPORT_SYMBOL(min_low_pfn); +EXPORT_SYMBOL(max_low_pfn); + #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; EXPORT_SYMBOL(kmap_pte); diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 47a8ffc0b413edd02fc26f2a890501803b280247..f5b8300f45735c033c81b49d91330127b6a52803 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return !(flags & 1); } +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + #endif /* #ifndef __ASSEMBLY__ */ /* diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index d35eaed1668f1a64f25f63c8d25c4e90cbba2f2f..825d337a505aefa5e5c4dda8a7f58098408c1ff5 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -969,7 +969,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h index b6e9c99b85a529661aaba3571150f1a306bcc4f2..eb181224eb4c4353926e6eb97a70fdd49b53ccac 100644 --- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h @@ -26,7 +26,6 @@ #define cpu_has_counter 1 #define cpu_has_dc_aliases (PAGE_SIZE < 0x4000) #define cpu_has_divec 0 -#define cpu_has_ejtag 0 #define cpu_has_inclusive_pcaches 1 #define cpu_has_llsc 1 #define cpu_has_mcheck 0 @@ -42,7 +41,6 @@ #define cpu_has_veic 0 #define cpu_has_vint 0 #define cpu_has_vtag_icache 0 -#define cpu_has_watch 1 #define cpu_has_wsbh 1 #define cpu_has_ic_fills_f_dc 1 #define cpu_hwrena_impl_bits 0xc0000000 diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h index f5e362f7970199533019237857846f050f7bafb8..bf24809231541644874229c21a3b244db38622a4 100644 --- a/arch/mips/include/asm/mach-loongson64/irq.h +++ b/arch/mips/include/asm/mach-loongson64/irq.h @@ -2,8 +2,6 @@ #ifndef __ASM_MACH_LOONGSON64_IRQ_H_ #define __ASM_MACH_LOONGSON64_IRQ_H_ -#include - /* cpu core interrupt numbers */ #define NR_IRQS_LEGACY 16 #define NR_MIPS_CPU_IRQS 8 diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h index 3a25dbd3b3e99ea6075ec1e2b10de1c8ea58bdc1..5eaca4fe3f924f96330c4e44fa5e1a675f12e2b1 100644 --- a/arch/mips/include/asm/mach-loongson64/mmzone.h +++ b/arch/mips/include/asm/mach-loongson64/mmzone.h @@ -9,7 +9,6 @@ #ifndef _ASM_MACH_LOONGSON64_MMZONE_H #define _ASM_MACH_LOONGSON64_MMZONE_H -#include #define NODE_ADDRSPACE_SHIFT 44 #define NODE0_ADDRSPACE_OFFSET 0x000000000000UL #define NODE1_ADDRSPACE_OFFSET 0x100000000000UL diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h index 7dd4a80e05d6ddef8d132d09acad6251210c92cf..6f4ac854b12d28911e409ee2df9c8c17a293e958 100644 --- a/arch/mips/include/asm/unroll.h +++ b/arch/mips/include/asm/unroll.h @@ -28,38 +28,38 @@ BUILD_BUG_ON(!__builtin_constant_p(times)); \ \ switch (times) { \ - case 32: fn(__VA_ARGS__); /* fall through */ \ - case 31: fn(__VA_ARGS__); /* fall through */ \ - case 30: fn(__VA_ARGS__); /* fall through */ \ - case 29: fn(__VA_ARGS__); /* fall through */ \ - case 28: fn(__VA_ARGS__); /* fall through */ \ - case 27: fn(__VA_ARGS__); /* fall through */ \ - case 26: fn(__VA_ARGS__); /* fall through */ \ - case 25: fn(__VA_ARGS__); /* fall through */ \ - case 24: fn(__VA_ARGS__); /* fall through */ \ - case 23: fn(__VA_ARGS__); /* fall through */ \ - case 22: fn(__VA_ARGS__); /* fall through */ \ - case 21: fn(__VA_ARGS__); /* fall through */ \ - case 20: fn(__VA_ARGS__); /* fall through */ \ - case 19: fn(__VA_ARGS__); /* fall through */ \ - case 18: fn(__VA_ARGS__); /* fall through */ \ - case 17: fn(__VA_ARGS__); /* fall through */ \ - case 16: fn(__VA_ARGS__); /* fall through */ \ - case 15: fn(__VA_ARGS__); /* fall through */ \ - case 14: fn(__VA_ARGS__); /* fall through */ \ - case 13: fn(__VA_ARGS__); /* fall through */ \ - case 12: fn(__VA_ARGS__); /* fall through */ \ - case 11: fn(__VA_ARGS__); /* fall through */ \ - case 10: fn(__VA_ARGS__); /* fall through */ \ - case 9: fn(__VA_ARGS__); /* fall through */ \ - case 8: fn(__VA_ARGS__); /* fall through */ \ - case 7: fn(__VA_ARGS__); /* fall through */ \ - case 6: fn(__VA_ARGS__); /* fall through */ \ - case 5: fn(__VA_ARGS__); /* fall through */ \ - case 4: fn(__VA_ARGS__); /* fall through */ \ - case 3: fn(__VA_ARGS__); /* fall through */ \ - case 2: fn(__VA_ARGS__); /* fall through */ \ - case 1: fn(__VA_ARGS__); /* fall through */ \ + case 32: fn(__VA_ARGS__); fallthrough; \ + case 31: fn(__VA_ARGS__); fallthrough; \ + case 30: fn(__VA_ARGS__); fallthrough; \ + case 29: fn(__VA_ARGS__); fallthrough; \ + case 28: fn(__VA_ARGS__); fallthrough; \ + case 27: fn(__VA_ARGS__); fallthrough; \ + case 26: fn(__VA_ARGS__); fallthrough; \ + case 25: fn(__VA_ARGS__); fallthrough; \ + case 24: fn(__VA_ARGS__); fallthrough; \ + case 23: fn(__VA_ARGS__); fallthrough; \ + case 22: fn(__VA_ARGS__); fallthrough; \ + case 21: fn(__VA_ARGS__); fallthrough; \ + case 20: fn(__VA_ARGS__); fallthrough; \ + case 19: fn(__VA_ARGS__); fallthrough; \ + case 18: fn(__VA_ARGS__); fallthrough; \ + case 17: fn(__VA_ARGS__); fallthrough; \ + case 16: fn(__VA_ARGS__); fallthrough; \ + case 15: fn(__VA_ARGS__); fallthrough; \ + case 14: fn(__VA_ARGS__); fallthrough; \ + case 13: fn(__VA_ARGS__); fallthrough; \ + case 12: fn(__VA_ARGS__); fallthrough; \ + case 11: fn(__VA_ARGS__); fallthrough; \ + case 10: fn(__VA_ARGS__); fallthrough; \ + case 9: fn(__VA_ARGS__); fallthrough; \ + case 8: fn(__VA_ARGS__); fallthrough; \ + case 7: fn(__VA_ARGS__); fallthrough; \ + case 6: fn(__VA_ARGS__); fallthrough; \ + case 5: fn(__VA_ARGS__); fallthrough; \ + case 4: fn(__VA_ARGS__); fallthrough; \ + case 3: fn(__VA_ARGS__); fallthrough; \ + case 2: fn(__VA_ARGS__); fallthrough; \ + case 1: fn(__VA_ARGS__); fallthrough; \ case 0: break; \ \ default: \ diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index efce5defcc5cfd1548d66c52ab8b7460e3e40237..011eb6bbf81a5ef8e1b62874c8e64e6bf510deae 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1898,8 +1898,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) (base_id >= 64 && base_id < 90) || (base_id >= 128 && base_id < 164) || (base_id >= 192 && base_id < 200) || - (base_id >= 256 && base_id < 274) || - (base_id >= 320 && base_id < 358) || + (base_id >= 256 && base_id < 275) || + (base_id >= 320 && base_id < 361) || (base_id >= 384 && base_id < 574)) break; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 2f513506a3d527403d698d191d91826e6dbe360f..1dbfb5aadffd6422b0a04d30cad818350b780512 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -239,6 +239,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { + bmips_cpu_setup(); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 38aa07ccdbcc25758ac931f8909a22971a7f3b8f..cf788591f0911271490e999c34f21b3f100edf2f 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1287,6 +1287,18 @@ static int enable_restore_fp_context(int msa) err = own_fpu_inatomic(1); if (msa && !err) { enable_msa(); + /* + * with MSA enabled, userspace can see MSACSR + * and MSA regs, but the values in them are from + * other task before current task, restore them + * from saved fp/msa context + */ + write_msa_csr(current->thread.fpu.msacsr); + /* + * own_fpu_inatomic(1) just restore low 64bit, + * fix the high 64bit + */ + init_msa_upper(); set_thread_flag(TIF_USEDMSA); set_thread_flag(TIF_MSA_CTX_LIVE); } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 7de85d2253ff5c436446e46358b8d7e7d5688710..0c50ac4442221f07814ed38d0e07d7dd67c8a2bc 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm); int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { + case KVM_VM_MIPS_AUTO: + break; #ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: #else diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 87fa8d8a10310740de792aad3461d9e7af5836d5..28c366d307e702719abe6409c9aceb43e7f1bbcb 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -486,7 +486,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index fc5a6d25f74ff42040e4dc9e9c655baaf1596869..0ef717093262fd3892c4e6595e99a9de3ce6f796 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1712,7 +1712,11 @@ static void setup_scache(void) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); + + if (current_cpu_type() == CPU_BMIPS5000) + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } + #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index 1493c49ca47a1933e86b4a87871ce244e0df22a1..55d7b7fd18b6fba66ef1d2c4b9d46e637fbc3cf3 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -245,7 +245,6 @@ static int mipsxx_perfcount_handler(void) switch (counters) { #define HANDLE_COUNTER(n) \ - fallthrough; \ case n + 1: \ control = r_c0_perfctrl ## n(); \ counter = r_c0_perfcntr ## n(); \ @@ -256,8 +255,11 @@ static int mipsxx_perfcount_handler(void) handled = IRQ_HANDLED; \ } HANDLE_COUNTER(3) + fallthrough; HANDLE_COUNTER(2) + fallthrough; HANDLE_COUNTER(1) + fallthrough; HANDLE_COUNTER(0) } diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index 0ecffb65fd6d1adbce4cbc324a42aabe8f91f088..b09dc844985a8978bac1cdb114ce54a1ad3524fe 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c @@ -222,8 +222,8 @@ void __init sni_a20r_irq_init(void) irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq); sni_hwint = a20r_hwint; change_c0_status(ST0_IM, IE_IRQ0); - if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA", - NULL)) + if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, + IRQF_SHARED, "ISA", sni_isa_irq_handler)) pr_err("Failed to register ISA interrupt\n"); } diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h index fb45ec46bb1b2e7be861a236a3773c090ca65b1e..51ef800bb3018b17449089ad7335875b97aba160 100644 --- a/arch/nds32/include/asm/irqflags.h +++ b/arch/nds32/include/asm/irqflags.h @@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) { return !flags; } + +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c index 62bdafbc53f4c8183a3412ffbf6a5065346db92b..9edd7ed7d7bf849e6d32b8036f6d6b5f1b0f37a5 100644 --- a/arch/nds32/kernel/fpu.c +++ b/arch/nds32/kernel/fpu.c @@ -45,7 +45,7 @@ void save_fpu(struct task_struct *tsk) : /* no output */ : "r" (&tsk->thread.fpu) : "memory"); - /* fall through */ + fallthrough; case SP32_DP16_reg: asm volatile ("fsdi $fd15, [%0+0x78]\n\t" "fsdi $fd14, [%0+0x70]\n\t" @@ -58,7 +58,7 @@ void save_fpu(struct task_struct *tsk) : /* no output */ : "r" (&tsk->thread.fpu) : "memory"); - /* fall through */ + fallthrough; case SP16_DP8_reg: asm volatile ("fsdi $fd7, [%0+0x38]\n\t" "fsdi $fd6, [%0+0x30]\n\t" @@ -67,7 +67,7 @@ void save_fpu(struct task_struct *tsk) : /* no output */ : "r" (&tsk->thread.fpu) : "memory"); - /* fall through */ + fallthrough; case SP8_DP4_reg: asm volatile ("fsdi $fd3, [%1+0x18]\n\t" "fsdi $fd2, [%1+0x10]\n\t" @@ -108,7 +108,7 @@ void load_fpu(const struct fpu_struct *fpregs) "fldi $fd16, [%0+0x80]\n\t" : /* no output */ : "r" (fpregs)); - /* fall through */ + fallthrough; case SP32_DP16_reg: asm volatile ("fldi $fd15, [%0+0x78]\n\t" "fldi $fd14, [%0+0x70]\n\t" @@ -120,7 +120,7 @@ void load_fpu(const struct fpu_struct *fpregs) "fldi $fd8, [%0+0x40]\n\t" : /* no output */ : "r" (fpregs)); - /* fall through */ + fallthrough; case SP16_DP8_reg: asm volatile ("fldi $fd7, [%0+0x38]\n\t" "fldi $fd6, [%0+0x30]\n\t" @@ -128,7 +128,7 @@ void load_fpu(const struct fpu_struct *fpregs) "fldi $fd4, [%0+0x20]\n\t" : /* no output */ : "r" (fpregs)); - /* fall through */ + fallthrough; case SP8_DP4_reg: asm volatile ("fldi $fd3, [%1+0x18]\n\t" "fldi $fd2, [%1+0x10]\n\t" diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c index 330b19fcd990317d7c34e5a986f677f54e3f2851..36e25a410bb0ec27eb4971d191ba6a0ee2846e53 100644 --- a/arch/nds32/kernel/signal.c +++ b/arch/nds32/kernel/signal.c @@ -316,7 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->uregs[0] = -EINTR; break; } - /* Else, fall through */ + fallthrough; case -ERESTARTNOINTR: regs->uregs[0] = regs->orig_r0; regs->ipc -= 4; @@ -361,7 +361,7 @@ static void do_signal(struct pt_regs *regs) switch (regs->uregs[0]) { case -ERESTART_RESTARTBLOCK: regs->uregs[15] = __NR_restart_syscall; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index f0390211236bfae0f5b438b73e8a3e9cbbdd1454..120f5005461b812466cf351ea4b3de5c3efb9ac9 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -165,19 +165,19 @@ struct __large_struct { #define __get_user_nocheck(x, ptr, size) \ ({ \ - long __gu_err, __gu_val; \ - __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + long __gu_err; \ + __get_user_size((x), (ptr), (size), __gu_err); \ __gu_err; \ }) #define __get_user_check(x, ptr, size) \ ({ \ - long __gu_err = -EFAULT, __gu_val = 0; \ + long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (access_ok(__gu_addr, size)) \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + if (access_ok(__gu_addr, size)) \ + __get_user_size((x), __gu_addr, (size), __gu_err); \ + else \ + (x) = (__typeof__(*(ptr))) 0; \ __gu_err; \ }) @@ -191,11 +191,13 @@ do { \ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ case 8: __get_user_asm2(x, ptr, retval); break; \ - default: (x) = __get_user_bad(); \ + default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \ } \ } while (0) #define __get_user_asm(x, addr, err, op) \ +{ \ + unsigned long __gu_tmp; \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ "2:\n" \ @@ -209,10 +211,14 @@ do { \ " .align 2\n" \ " .long 1b,3b\n" \ ".previous" \ - : "=r"(err), "=r"(x) \ - : "r"(addr), "i"(-EFAULT), "0"(err)) + : "=r"(err), "=r"(__gu_tmp) \ + : "r"(addr), "i"(-EFAULT), "0"(err)); \ + (x) = (__typeof__(*(addr)))__gu_tmp; \ +} #define __get_user_asm2(x, addr, err) \ +{ \ + unsigned long long __gu_tmp; \ __asm__ __volatile__( \ "1: l.lwz %1,0(%2)\n" \ "2: l.lwz %H1,4(%2)\n" \ @@ -229,8 +235,11 @@ do { \ " .long 1b,4b\n" \ " .long 2b,4b\n" \ ".previous" \ - : "=r"(err), "=&r"(x) \ - : "r"(addr), "i"(-EFAULT), "0"(err)) + : "=r"(err), "=&r"(__gu_tmp) \ + : "r"(addr), "i"(-EFAULT), "0"(err)); \ + (x) = (__typeof__(*(addr)))( \ + (__typeof__((x)-(x)))__gu_tmp); \ +} /* more complex routines */ diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index b18e775f8be3b101ef2b204914771daaebb5f5b0..13c87f1f872b44b4152f8c3e5745f0c525455eed 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -80,6 +80,16 @@ static void __init setup_memory(void) */ memblock_reserve(__pa(_stext), _end - _stext); +#ifdef CONFIG_BLK_DEV_INITRD + /* Then reserve the initrd, if any */ + if (initrd_start && (initrd_end > initrd_start)) { + unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE); + unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE); + + memblock_reserve(__pa(aligned_start), aligned_end - aligned_start); + } +#endif /* CONFIG_BLK_DEV_INITRD */ + early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 97804f21a40c832a61d646cf4cefddd17746f31a..c779364f0cd09bf1b1aa57c188711498b9831a2e 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -244,7 +244,7 @@ int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart = -2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index 08f56af387ac4b12700ee26e993966b7dc700158..534a52ec5e66780fc73dcdfcd15b9c0929319c0c 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -16,7 +16,7 @@ #include #include -static void cache_loop(struct page *page, const unsigned int reg) +static __always_inline void cache_loop(struct page *page, const unsigned int reg) { unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 5df5d4cd5d4cf0f08da843c4039f8920cfbafef3..3c037fc960387bb7b5cad81b2c4c450a8dcaae9f 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -502,7 +502,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) regs->gr[28] = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: check_syscallno_in_delay_branch(regs); break; diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 43875c289723ea9f6669e5e5eb67d59ac6f64d12..a52c7abf2ca49284ce495eda92270e7aefada485 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -437,7 +437,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o break; default: - /* Fall through */ break; } @@ -644,12 +643,12 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 15: /* Data TLB miss fault/Data page fault */ - /* Fall through */ + fallthrough; case 16: /* Non-access instruction TLB miss fault */ /* The instruction TLB entry needed for the target address of the FIC is absent, and hardware can't find it, so we get to cleanup */ - /* Fall through */ + fallthrough; case 17: /* Non-access data TLB miss fault/Non-access data page fault */ /* FIXME: @@ -673,7 +672,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) handle_unaligned(regs); return; } - /* Fall Through */ + fallthrough; case 26: /* PCXL: Data memory access rights trap */ fault_address = regs->ior; @@ -683,7 +682,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) case 19: /* Data memory break trap */ regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ - /* fall thru */ + fallthrough; case 21: /* Page reference trap */ handle_gdb_break(regs, TRAP_HWBKPT); @@ -730,7 +729,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) } mmap_read_unlock(current->mm); } - /* Fall Through */ + fallthrough; case 27: /* Data memory protection ID trap */ if (code == 27 && !user_mode(regs) && diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 4bfe2da9fbe3a4f9cc81a2788eb9394d8868d088..716960f5d92ea460a4b1084954e7bb69ac6cc476 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -67,7 +67,7 @@ parisc_acctyp(unsigned long code, unsigned int inst) case 0x30000000: /* coproc2 */ if (bit22set(inst)) return VM_WRITE; - /* fall through */ + fallthrough; case 0x0: /* indexed/memory management */ if (bit22set(inst)) { @@ -370,7 +370,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, } /* probably address is outside of mapped file */ - /* fall through */ + fallthrough; case 17: /* NA data TLB miss / page fault */ case 18: /* Unaligned access - PCXS only */ signo = SIGBUS; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1f48bbfb3ce99dc9eccbee3612689e88be4ae675..65bed1fdeaad71e7bd7c3fdeecdf41f019b4b71e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -860,6 +860,18 @@ config PPC_SUBPAGE_PROT If unsure, say N here. +config PPC_PROT_SAO_LPAR + bool "Support PROT_SAO mappings in LPARs" + depends on PPC_BOOK3S_64 + help + This option adds support for PROT_SAO mappings from userspace + inside LPARs on supported CPUs. + + This may cause issues when performing guest migration from + a CPU that supports SAO to one that does not. + + If unsure, say N here. + config PPC_COPRO_BASE bool diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 6de56c3b33c4217c40805ac4e3c76f84fc0c0e47..495fc0ccb45318404567b474d44528824477ff33 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -20,13 +20,9 @@ #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ - -#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ - /* No bits set is normal cacheable memory */ - /* 0x00010 unused, is SAO bit on radix POWER9 */ +#define _PAGE_SAO 0x00010 /* Strong access order */ #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ - #define _PAGE_DIRTY 0x00080 /* C: page changed */ #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ /* @@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, return hash__set_pte_at(mm, addr, ptep, pte, percpu); } +#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) + #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) { diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index fdddb822d564b0267403e4cdc06b7536c65e4f2b..32a15dc49e8ca95ba096b63bf4945ca1b93633de 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -9,6 +9,11 @@ #ifndef __ASSEMBLY__ +/* + * Added to include __machine_check_early_realmode_* functions + */ +#include + /* This structure can grow, it's real size is used by head.S code * via the mkdefs mechanism. */ @@ -191,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) -// Free LONG_ASM_CONST(0x0000000008000000) +#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) @@ -436,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ + CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | \ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) @@ -445,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ @@ -456,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ @@ -474,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ - CPU_FTR_DSCR | \ + CPU_FTR_DSCR | CPU_FTR_SAO | \ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 925cf89cbf4bac3617f921eecb8c1b36d895a818..6bfc87915d5dbaeafcc63a3cd8f3e74ba7f0d986 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -52,7 +52,7 @@ enum fixed_addresses { FIX_HOLE, /* reserve the top 128K for early debugging purposes */ FIX_EARLY_DEBUG_TOP = FIX_HOLE, - FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1, + FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 3a0db7b0b46efcd9780ed23a8ec7a9852ec18cdc..35060be090732d7cc0bd5b9906df4602a8d2ffea 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void) #define powerpc_local_irq_pmu_save(flags) \ do { \ raw_local_irq_pmu_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while(0) #define powerpc_local_irq_pmu_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_pmu_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_pmu_restore(flags); \ - } \ + raw_local_irq_pmu_restore(flags); \ } while(0) #else #define powerpc_local_irq_pmu_save(flags) \ diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h index d635b96c7ea69bbb124918721f424002f63d79f2..7355ed05e65edab1965a960ebecefb1513b77c8b 100644 --- a/arch/powerpc/include/asm/kasan.h +++ b/arch/powerpc/include/asm/kasan.h @@ -15,11 +15,18 @@ #ifndef __ASSEMBLY__ #include +#include #define KASAN_SHADOW_SCALE_SHIFT 3 +#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX) +#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M) +#else +#define KASAN_KERN_START PAGE_OFFSET +#endif + #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ - (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT)) + (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT)) #define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index e020d269416d633f45b3fb19e8670f3728dbd4dd..10ded83414dea0269bb891a27a11b9886da600e9 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -58,7 +58,8 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, + unsigned flags); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index adf2cda67f9a40ddb02ccbf2225cbc6fc3cf090f..89aa8248a57ddd0c527dc3f6b6cb2ce9ed371292 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -210,6 +210,9 @@ struct mce_error_info { #define MCE_EVENT_RELEASE true #define MCE_EVENT_DONTRELEASE false +struct pt_regs; +struct notifier_block; + extern void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, uint64_t nip, uint64_t addr, uint64_t phys_addr); @@ -225,5 +228,9 @@ int mce_register_notifier(struct notifier_block *nb); int mce_unregister_notifier(struct notifier_block *nb); #ifdef CONFIG_PPC_BOOK3S_64 void flush_and_reload_slb(void); +long __machine_check_early_realmode_p7(struct pt_regs *regs); +long __machine_check_early_realmode_p8(struct pt_regs *regs); +long __machine_check_early_realmode_p9(struct pt_regs *regs); +long __machine_check_early_realmode_p10(struct pt_regs *regs); #endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index 7c07728af300b8b7a53fe75ff153886e3ba99d65..7cb6d18f5cd6d5f0f8e5b9971fd64ab5b34e2bb1 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -13,20 +13,43 @@ #include #include -#ifdef CONFIG_PPC_MEM_KEYS static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { - return pkey_to_vmflag_bits(pkey); +#ifdef CONFIG_PPC_MEM_KEYS + return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); +#else + return ((prot & PROT_SAO) ? VM_SAO : 0); +#endif } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); +#ifdef CONFIG_PPC_MEM_KEYS + return (vm_flags & VM_SAO) ? + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); +#else + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); +#endif } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) -#endif + +static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) +{ + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) + return false; + if (prot & PROT_SAO) { + if (!cpu_has_feature(CPU_FTR_SAO)) + return false; + if (firmware_has_feature(FW_FEATURE_LPAR) && + !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR)) + return false; + } + return true; +} +#define arch_validate_prot arch_validate_prot #endif /* CONFIG_PPC64 */ #endif /* _ASM_POWERPC_MMAN_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 59ee9fa4ae09d2b498b6c2d16d45f708a4283ebe..6cb8aa35719176a297218f94e6ef089351e5bd77 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -82,6 +82,8 @@ */ #include +#define _PAGE_SAO 0 + #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) /* diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 1e8b2e1ec1db6ac72fe0b174c82a18364a266cc2..daec64d41b44781576f7369601fb551622912386 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h @@ -40,4 +40,7 @@ static inline bool is_sier_available(void) { return false; } /* To support perf_regs sier update */ extern bool is_sier_available(void); +/* To define perf extended regs mask value */ +extern u64 PERF_REG_EXTENDED_MASK; +#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK #endif diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 86c9eb064b22b4a636f94cf71910d03b8fdce4d3..f6acabb6c9be8081d8d5cba42b76fa1f88723dcf 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -62,6 +62,11 @@ struct power_pmu { int *blacklist_ev; /* BHRB entries in the PMU */ int bhrb_nr; + /* + * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if + * the pmu supports extended perf regs capability + */ + int capabilities; }; /* diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h index 3a700351fecafbf7f1b26d574cd65852dd6156f3..c0c737215b00bdc70def55a5b35aa8216e456db0 100644 --- a/arch/powerpc/include/uapi/asm/mman.h +++ b/arch/powerpc/include/uapi/asm/mman.h @@ -11,7 +11,7 @@ #include -#define PROT_SAO 0x10 /* Unsupported since v5.9 */ +#define PROT_SAO 0x10 /* Strong Access Ordering */ #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h index f599064dd8dc8431fbe705320d9afc7f862f1207..bdf5f10f8b9f50c58e7fa618e1ab64f7beb6e095 100644 --- a/arch/powerpc/include/uapi/asm/perf_regs.h +++ b/arch/powerpc/include/uapi/asm/perf_regs.h @@ -48,6 +48,24 @@ enum perf_event_powerpc_regs { PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_SIER, PERF_REG_POWERPC_MMCRA, - PERF_REG_POWERPC_MAX, + /* Extended registers */ + PERF_REG_POWERPC_MMCR0, + PERF_REG_POWERPC_MMCR1, + PERF_REG_POWERPC_MMCR2, + PERF_REG_POWERPC_MMCR3, + PERF_REG_POWERPC_SIER2, + PERF_REG_POWERPC_SIER3, + /* Max regs without the extended regs */ + PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1, }; + +#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1) + +/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300 */ +#define PERF_REG_PMU_MASK_300 (((1ULL << (PERF_REG_POWERPC_MMCR2 + 1)) - 1) - PERF_REG_PMU_MASK) +/* PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31 */ +#define PERF_REG_PMU_MASK_31 (((1ULL << (PERF_REG_POWERPC_SIER3 + 1)) - 1) - PERF_REG_PMU_MASK) + +#define PERF_REG_MAX_ISA_300 (PERF_REG_POWERPC_MMCR2 + 1) +#define PERF_REG_MAX_ISA_31 (PERF_REG_POWERPC_SIER3 + 1) #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 3d406a9626e86ed68e9e0e567e306347efdd982c..2aa89c6b289679a501737b4569449d167c3eed5b 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -72,9 +72,6 @@ extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power9(void); extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power10(void); -extern long __machine_check_early_realmode_p7(struct pt_regs *regs); -extern long __machine_check_early_realmode_p8(struct pt_regs *regs); -extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); @@ -542,6 +539,25 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, + { /* Power10 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00800000, + .cpu_name = "POWER10 (raw)", + .cpu_features = CPU_FTRS_POWER10, + .cpu_user_features = COMMON_USER_POWER10, + .cpu_user_features2 = COMMON_USER2_POWER10, + .mmu_features = MMU_FTRS_POWER10, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power10", + .oprofile_type = PPC_OPROFILE_INVALID, + .cpu_setup = __setup_cpu_power10, + .cpu_restore = __restore_cpu_power10, + .machine_check_early = __machine_check_early_realmode_p10, + .platform = "power10", + }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, .pvr_value = 0x00700000, diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 6f8c0c6b937a17229dd0b7471f4f7ef9513cd1c0..f204ad79b6b54c55990eead08c87cb13b58bb737 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -64,10 +64,6 @@ struct dt_cpu_feature { * Set up the base CPU */ -extern long __machine_check_early_realmode_p8(struct pt_regs *regs); -extern long __machine_check_early_realmode_p9(struct pt_regs *regs); -extern long __machine_check_early_realmode_p10(struct pt_regs *regs); - static int hv_mode; static struct { @@ -657,7 +653,7 @@ static struct dt_cpu_feature_match __initdata {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, {"processor-utilization-of-resources-register", feat_enable_purr, 0}, {"no-execute", feat_enable, 0}, - /* strong-access-ordering is unused */ + {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, {"cache-inhibited-large-page", feat_enable_large_ci, 0}, {"coprocessor-icswx", feat_enable, 0}, {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0}, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 33a42e42c56f6774705a4be29f41e980772f9e71..733e40eba4ebe3edbbd3afb1e1bb2bb8ef354aa6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ +BEGIN_FTR_SECTION + HMT_MEDIUM +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + /* * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which * would clobber syscall parameters. Also we always enter with IRQs diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 016bd831908ec191fe02d252a8d4cc298ca4b33f..73a57043ee6621c00569086177372a7987c3212a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs) * are live for the user thread). */ if ((!(msr & MSR_FP)) && should_restore_fp()) - new_msr |= MSR_FP | current->thread.fpexc_mode; + new_msr |= MSR_FP; if ((!(msr & MSR_VEC)) && should_restore_altivec()) new_msr |= MSR_VEC; @@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs) } if (new_msr) { + unsigned long fpexc_mode = 0; + msr_check_and_set(new_msr); - if (new_msr & MSR_FP) + if (new_msr & MSR_FP) { do_restore_fp(); + // This also covers VSX, because VSX implies FP + fpexc_mode = current->thread.fpexc_mode; + } + if (new_msr & MSR_VEC) do_restore_altivec(); @@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs) msr_check_and_clear(new_msr); - regs->msr |= new_msr; + regs->msr |= new_msr | fpexc_mode; } } #endif diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index b198b0ff25bcf2e9f10f5127aa81a97f80f04e9b..808ec9fab6052fbaadbda25f6137a05d2814d486 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -311,6 +311,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) min = pvr & 0xFF; break; case 0x004e: /* POWER9 bits 12-15 give chip type */ + case 0x0080: /* POWER10 bit 12 gives SMT8/4 */ maj = (pvr >> 8) & 0x0F; min = pvr & 0xFF; break; diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 41fedec69ac350fc6c7a11a4b90722190c8f5c3a..49db50d1db04cde639622186f7d505de7bed1108 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -834,7 +834,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index d6c1069e9954aa280b10237eccd749de5783fc54..ed0c9c43d0cf14cc8a4fd57d72cd74b36ff644ed 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index c0162911f6cbdce7b3677795dd8176932669855f..d426eaf76bb048e70c99a3487e65339ae33bf473 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -191,10 +191,17 @@ static bool is_module_segment(unsigned long addr) { if (!IS_ENABLED(CONFIG_MODULES)) return false; +#ifdef MODULES_VADDR + if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) + return false; + if (addr > ALIGN(MODULES_END, SZ_256M) - 1) + return false; +#else if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M)) return false; - if (addr >= ALIGN(VMALLOC_END, SZ_256M)) + if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1) return false; +#endif return true; } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 1da9dbba92171b9afc9fffb5f9e1ce67d96c7e71..c663e7ba801fce6969ea717958b7170c941935e8 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); + else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) + rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set @@ -1115,8 +1117,10 @@ void hash__early_init_mmu_secondary(void) && cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); - if (IS_ENABLED(CONFIG_PPC_MEM_KEYS) && mmu_has_feature(MMU_FTR_PKEY)) +#ifdef CONFIG_PPC_MEM_KEYS + if (mmu_has_feature(MMU_FTR_PKEY)) mtspr(SPRN_UAMOR, default_uamor); +#endif } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 16d09b36fe0638899308b7d4704567dcee4b0968..78d61f97371e1b70ecdb6bf19b38f95e53a22707 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -475,7 +475,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: true_cond = COND_NE; - /* Fall through */ + fallthrough; cond_branch: /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 78fe349865942719cea9a1359459dde0293b7703..08643cba1494822f7585906c3b3b39aab4cb1a90 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1557,9 +1557,16 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) ret = 0; out: if (has_branch_stack(event)) { - power_pmu_bhrb_enable(event); - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( - event->attr.branch_sample_type); + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( + event->attr.branch_sample_type); + + if (bhrb_filter != -1) { + cpuhw->bhrb_filter = bhrb_filter; + power_pmu_bhrb_enable(event); + } } perf_pmu_enable(event->pmu); @@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event) int n; int err; struct cpu_hw_events *cpuhw; - u64 bhrb_filter; if (!ppmu) return -ENOENT; @@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event) err = power_check_constraints(cpuhw, events, cflags, n + 1); if (has_branch_stack(event)) { - bhrb_filter = ppmu->bhrb_filter_map( + u64 bhrb_filter = -1; + + if (ppmu->bhrb_filter_map) + bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); if (bhrb_filter == -1) { @@ -2141,6 +2150,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); + } else if (period) { + /* Account for interrupt in case of invalid SIAR */ + if (perf_event_account_interrupt(event)) + power_pmu_stop(event, 0); } } @@ -2323,6 +2336,7 @@ int register_power_pmu(struct power_pmu *pmu) pmu->name); power_pmu.attr_groups = ppmu->attr_groups; + power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS); #ifdef MSR_HV /* diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index cdb7bfbd157e0e2f80af9519f9c3a284449eea91..6e7e820508df7c071f7ef38ecf2eba6c461a2944 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1128,6 +1128,15 @@ static struct bin_attribute *if_bin_attrs[] = { NULL, }; +static struct attribute *cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group cpumask_attr_group = { + .attrs = cpumask_attrs, +}; + static struct attribute *if_attrs[] = { &dev_attr_catalog_len.attr, &dev_attr_catalog_version.attr, @@ -1135,7 +1144,6 @@ static struct attribute *if_attrs[] = { &dev_attr_sockets.attr, &dev_attr_chipspersocket.attr, &dev_attr_coresperchip.attr, - &dev_attr_cpumask.attr, NULL, }; @@ -1151,6 +1159,7 @@ static const struct attribute_group *attr_groups[] = { &event_desc_group, &event_long_desc_group, &if_group, + &cpumask_attr_group, NULL, }; diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index a45d694a5d5d88814ec13bb2d76ec12cc6e4af82..62d0b54086f884c1f8a43023988363a18373953f 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem, header->misc = 0; if (cpu_has_feature(CPU_FTR_ARCH_31)) { - switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) { + switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { case 0:/* when MSR HV and PR not set in the trace-record */ header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; break; @@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem, header->misc |= PERF_RECORD_MISC_GUEST_USER; break; case 2: /* MSR HV is 1 and PR is 0 */ - header->misc |= PERF_RECORD_MISC_HYPERVISOR; + header->misc |= PERF_RECORD_MISC_KERNEL; break; case 3: /* MSR HV is 1 and PR is 1 */ header->misc |= PERF_RECORD_MISC_USER; diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index a213a0aa5d25cfa3ea5f7a2034f89d8e5652f7c3..8e53f2fc3fe07f3e5b782f067094e50bb94fa33d 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -13,9 +13,11 @@ #include #include +u64 PERF_REG_EXTENDED_MASK; + #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) -#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1)) +#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK)) static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]), @@ -69,10 +71,36 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = { PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr), }; +/* Function to return the extended register values */ +static u64 get_ext_regs_value(int idx) +{ + switch (idx) { + case PERF_REG_POWERPC_MMCR0: + return mfspr(SPRN_MMCR0); + case PERF_REG_POWERPC_MMCR1: + return mfspr(SPRN_MMCR1); + case PERF_REG_POWERPC_MMCR2: + return mfspr(SPRN_MMCR2); +#ifdef CONFIG_PPC64 + case PERF_REG_POWERPC_MMCR3: + return mfspr(SPRN_MMCR3); + case PERF_REG_POWERPC_SIER2: + return mfspr(SPRN_SIER2); + case PERF_REG_POWERPC_SIER3: + return mfspr(SPRN_SIER3); +#endif + default: return 0; + } +} + u64 perf_reg_value(struct pt_regs *regs, int idx) { - if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX)) - return 0; + u64 perf_reg_extended_max = PERF_REG_POWERPC_MAX; + + if (cpu_has_feature(CPU_FTR_ARCH_31)) + perf_reg_extended_max = PERF_REG_MAX_ISA_31; + else if (cpu_has_feature(CPU_FTR_ARCH_300)) + perf_reg_extended_max = PERF_REG_MAX_ISA_300; if (idx == PERF_REG_POWERPC_SIER && (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) || @@ -85,6 +113,16 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) IS_ENABLED(CONFIG_PPC32))) return 0; + if (idx >= PERF_REG_POWERPC_MAX && idx < perf_reg_extended_max) + return get_ext_regs_value(idx); + + /* + * If the idx is referring to value beyond the + * supported registers, return 0 with a warning + */ + if (WARN_ON_ONCE(idx >= perf_reg_extended_max)) + return 0; + return regs_get_register(regs, pt_regs_offset[idx]); } diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c index f7cff7f36a1cd4af4dff9a1f5e63043898d5722e..83148656b5241e7b3956eb685fe4bf30d7401519 100644 --- a/arch/powerpc/perf/power10-pmu.c +++ b/arch/powerpc/perf/power10-pmu.c @@ -87,6 +87,8 @@ #define POWER10_MMCRA_IFM3 0x00000000C0000000UL #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL +extern u64 PERF_REG_EXTENDED_MASK; + /* Table of alternatives, sorted by column 0 */ static const unsigned int power10_event_alternatives[][MAX_ALT] = { { PM_RUN_CYC_ALT, PM_RUN_CYC }, @@ -397,6 +399,7 @@ static struct power_pmu power10_pmu = { .cache_events = &power10_cache_events, .attr_groups = power10_pmu_attr_groups, .bhrb_nr = 32, + .capabilities = PERF_PMU_CAP_EXTENDED_REGS, }; int init_power10_pmu(void) @@ -408,6 +411,9 @@ int init_power10_pmu(void) strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10")) return -ENODEV; + /* Set the PERF_REG_EXTENDED_MASK here */ + PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31; + rc = register_power_pmu(&power10_pmu); if (rc) return rc; diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 05dae38b969a5fef609d83c91d23a84040dc3439..2a57e93a79dcf703b920f1b556d46d60308507e4 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -90,6 +90,8 @@ enum { #define POWER9_MMCRA_IFM3 0x00000000C0000000UL #define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL +extern u64 PERF_REG_EXTENDED_MASK; + /* Nasty Power9 specific hack */ #define PVR_POWER9_CUMULUS 0x00002000 @@ -434,6 +436,7 @@ static struct power_pmu power9_pmu = { .cache_events = &power9_cache_events, .attr_groups = power9_pmu_attr_groups, .bhrb_nr = 32, + .capabilities = PERF_PMU_CAP_EXTENDED_REGS, }; int init_power9_pmu(void) @@ -457,6 +460,9 @@ int init_power9_pmu(void) } } + /* Set the PERF_REG_EXTENDED_MASK here */ + PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300; + rc = register_power_pmu(&power9_pmu); if (rc) return rc; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 87737ec86d39aa3b9236bb5b27408f081d82e4d9..1dc9d3c818726b5e78a64c9b04efdb136bcb43d2 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx select PPC_HAVE_PMU_SUPPORT select PPC_HAVE_KUEP select PPC_HAVE_KUAP - select HAVE_ARCH_VMAP_STACK + select HAVE_ARCH_VMAP_STACK if !ADB_PMU config PPC_BOOK3S_601 bool "PowerPC 601" diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 77513a80cef902ed4cb61f69118007691e8b9648..345ab062b21af7cbd23f0f1aaffb5329043f7ac8 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void) return; } - if (pvr_version_is(PVR_POWER9)) + if (cpu_has_feature(CPU_FTR_ARCH_300)) pnv_power9_idle_init(); for (i = 0; i < nr_pnv_idle_states; i++) diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index c9c25fb0783c4c79f8b36c89cceda0d1d0982b86..023a4f987bb22d5c25955ac93dd2275593cbf0f9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2705,7 +2705,7 @@ void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) struct iommu_table *tbl = pe->table_group.tables[0]; int64_t rc; - if (pe->dma_setup_done) + if (!pe->dma_setup_done) return; rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index c6e0d8abf75ea917df2b3b9626f32d33449d634b..7a974ed6b2400ec81235338ec9873654a6ded548 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -107,22 +107,28 @@ static int pseries_cpu_disable(void) */ static void pseries_cpu_die(unsigned int cpu) { - int tries; int cpu_status = 1; unsigned int pcpu = get_hard_smp_processor_id(cpu); + unsigned long timeout = jiffies + msecs_to_jiffies(120000); - for (tries = 0; tries < 25; tries++) { + while (true) { cpu_status = smp_query_cpu_stopped(pcpu); if (cpu_status == QCSS_STOPPED || cpu_status == QCSS_HARDWARE_ERROR) break; - cpu_relax(); + if (time_after(jiffies, timeout)) { + pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n", + cpu, pcpu); + timeout = jiffies + msecs_to_jiffies(120000); + } + + cond_resched(); } - if (cpu_status != 0) { - printk("Querying DEAD? cpu %i (%i) shows %i\n", - cpu, pcpu, cpu_status); + if (cpu_status == QCSS_HARDWARE_ERROR) { + pr_warn("CPU %i (hwid %i) reported error while dying\n", + cpu, pcpu); } /* Isolation and deallocation are definitely done by diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index f3736fcd98fcb8de97a514dbf786e195f87a8b40..13c86a292c6d70c2dff08bcbf947e7370a55cb81 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier) case EPOW_SHUTDOWN_ON_UPS: pr_emerg("Loss of system power detected. System is running on" " UPS/battery. Check RTAS error log for details\n"); - orderly_poweroff(true); break; case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7b59055291466488f3e6bd613c0196e9b77d3b92..df18372861d8d22e3c0f6b8485c6128c65de733b 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -81,7 +81,7 @@ config RISCV select PCI_DOMAINS_GENERIC if PCI select PCI_MSI if PCI select RISCV_INTC - select RISCV_TIMER + select RISCV_TIMER if RISCV_SBI select SPARSEMEM_STATIC if 32BIT select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index 6c88148f1b9b3ff6c02e5b6a6e02b25be26f7f5f..8a55f6156661d450c81a81e9ac22895989e19531 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -12,6 +12,7 @@ config SOC_SIFIVE config SOC_VIRT bool "QEMU Virt Machine" + select CLINT_TIMER if RISCV_M_MODE select POWER_RESET select POWER_RESET_SYSCON select POWER_RESET_SYSCON_POWEROFF @@ -24,6 +25,7 @@ config SOC_VIRT config SOC_KENDRYTE bool "Kendryte K210 SoC" depends on !MMU + select CLINT_TIMER if RISCV_M_MODE select SERIAL_SIFIVE if TTY select SERIAL_SIFIVE_CONSOLE if TTY select SIFIVE_PLIC diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig index f27596e9663e325e645c01454bfd0228dacfa0c8..e046a0babde43711b8ab4d7faba64c3b2444b817 100644 --- a/arch/riscv/configs/nommu_virt_defconfig +++ b/arch/riscv/configs/nommu_virt_defconfig @@ -26,6 +26,7 @@ CONFIG_EXPERT=y CONFIG_SLOB=y # CONFIG_SLAB_MERGE_DEFAULT is not set # CONFIG_MMU is not set +CONFIG_SOC_VIRT=y CONFIG_MAXPHYSMEM_2GB=y CONFIG_SMP=y CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0" @@ -49,7 +50,6 @@ CONFIG_VIRTIO_BLK=y # CONFIG_SERIO is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_LDISC_AUTOLOAD is not set -# CONFIG_DEVMEM is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -57,16 +57,13 @@ CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set +# CONFIG_DEVMEM is not set # CONFIG_HWMON is not set -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_BACKLIGHT_CLASS_DEVICE is not set # CONFIG_VGA_CONSOLE is not set # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_SIFIVE_PLIC=y -# CONFIG_VALIDATE_FS_PARSER is not set CONFIG_EXT2_FS=y # CONFIG_DNOTIFY is not set # CONFIG_INOTIFY_USER is not set diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig index 3a55f0e00d6c05aad8872faee20c4dc9d4f750df..2c2cda6cc1c5352ef794e0db7d941c72f6229383 100644 --- a/arch/riscv/configs/rv32_defconfig +++ b/arch/riscv/configs/rv32_defconfig @@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y +CONFIG_SOC_SIFIVE=y CONFIG_SOC_VIRT=y CONFIG_ARCH_RV32I=y CONFIG_SMP=y @@ -62,6 +63,8 @@ CONFIG_HVC_RISCV_SBI=y CONFIG_VIRTIO_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_SPI=y +CONFIG_SPI_SIFIVE=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_POWER_RESET=y CONFIG_DRM=y @@ -77,6 +80,8 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y +CONFIG_MMC=y +CONFIG_MMC_SPI=y CONFIG_RTC_CLASS=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h deleted file mode 100644 index a279b17a6aade915d0f330247a02387515837da0..0000000000000000000000000000000000000000 --- a/arch/riscv/include/asm/clint.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_RISCV_CLINT_H -#define _ASM_RISCV_CLINT_H 1 - -#include -#include - -#ifdef CONFIG_RISCV_M_MODE -extern u32 __iomem *clint_ipi_base; - -void clint_init_boot_cpu(void); - -static inline void clint_send_ipi_single(unsigned long hartid) -{ - writel(1, clint_ipi_base + hartid); -} - -static inline void clint_send_ipi_mask(const struct cpumask *mask) -{ - int cpu; - - for_each_cpu(cpu, mask) - clint_send_ipi_single(cpuid_to_hartid_map(cpu)); -} - -static inline void clint_clear_ipi(unsigned long hartid) -{ - writel(0, clint_ipi_base + hartid); -} -#else /* CONFIG_RISCV_M_MODE */ -#define clint_init_boot_cpu() do { } while (0) - -/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */ -void clint_send_ipi_single(unsigned long hartid); -void clint_send_ipi_mask(const struct cpumask *hartid_mask); -void clint_clear_ipi(unsigned long hartid); -#endif /* CONFIG_RISCV_M_MODE */ - -#endif /* _ASM_RISCV_CLINT_H */ diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index 6dfd2a1446d56c53041a8c34db003683a3d680b5..df1f7c4cd4337be6fff336642d152eb0e6261f09 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -15,6 +15,11 @@ struct seq_file; extern unsigned long boot_cpu_hartid; +struct riscv_ipi_ops { + void (*ipi_inject)(const struct cpumask *target); + void (*ipi_clear)(void); +}; + #ifdef CONFIG_SMP /* * Mapping between linux logical cpu index and hartid. @@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu); int riscv_hartid_to_cpuid(int hartid); void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); +/* Set custom IPI operations */ +void riscv_set_ipi_ops(struct riscv_ipi_ops *ops); + +/* Clear IPI for current CPU */ +void riscv_clear_ipi(void); + /* Secondary hart entry */ asmlinkage void smp_callin(void); @@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, cpumask_set_cpu(boot_cpu_hartid, out); } +static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +{ +} + +static inline void riscv_clear_ipi(void) +{ +} + #endif /* CONFIG_SMP */ #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index bad2a7c2cda52ee352daa2d954389429bdf3ec28..a3fb85d505d44fada43cc15625b2f007783e9025 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -7,41 +7,27 @@ #define _ASM_RISCV_TIMEX_H #include -#include typedef unsigned long cycles_t; -extern u64 __iomem *riscv_time_val; -extern u64 __iomem *riscv_time_cmp; - -#ifdef CONFIG_64BIT -#define mmio_get_cycles() readq_relaxed(riscv_time_val) -#else -#define mmio_get_cycles() readl_relaxed(riscv_time_val) -#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1) -#endif - static inline cycles_t get_cycles(void) { - if (IS_ENABLED(CONFIG_RISCV_SBI)) - return csr_read(CSR_TIME); - return mmio_get_cycles(); + return csr_read(CSR_TIME); } #define get_cycles get_cycles +static inline u32 get_cycles_hi(void) +{ + return csr_read(CSR_TIMEH); +} +#define get_cycles_hi get_cycles_hi + #ifdef CONFIG_64BIT static inline u64 get_cycles64(void) { return get_cycles(); } #else /* CONFIG_64BIT */ -static inline u32 get_cycles_hi(void) -{ - if (IS_ENABLED(CONFIG_RISCV_SBI)) - return csr_read(CSR_TIMEH); - return mmio_get_cycles_hi(); -} - static inline u64 get_cycles64(void) { u32 hi, lo; diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index a5287ab9f7f25f1d4a565d3acf6b40a1991304ce..dc93710f0b2f10ba478a96cf97cb5e082babef50 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -31,7 +31,7 @@ obj-y += cacheinfo.o obj-y += patch.o obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o +obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/riscv/kernel/clint.c b/arch/riscv/kernel/clint.c deleted file mode 100644 index 3647980d14c3c9c853c3541b0907a951c8ef4716..0000000000000000000000000000000000000000 --- a/arch/riscv/kernel/clint.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2019 Christoph Hellwig. - */ - -#include -#include -#include -#include -#include -#include -#include - -/* - * This is the layout used by the SiFive clint, which is also shared by the qemu - * virt platform, and the Kendryte KD210 at least. - */ -#define CLINT_IPI_OFF 0 -#define CLINT_TIME_CMP_OFF 0x4000 -#define CLINT_TIME_VAL_OFF 0xbff8 - -u32 __iomem *clint_ipi_base; - -void clint_init_boot_cpu(void) -{ - struct device_node *np; - void __iomem *base; - - np = of_find_compatible_node(NULL, NULL, "riscv,clint0"); - if (!np) { - panic("clint not found"); - return; - } - - base = of_iomap(np, 0); - if (!base) - panic("could not map CLINT"); - - clint_ipi_base = base + CLINT_IPI_OFF; - riscv_time_cmp = base + CLINT_TIME_CMP_OFF; - riscv_time_val = base + CLINT_TIME_VAL_OFF; - - clint_clear_ipi(boot_cpu_hartid); -} diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index f383ef5672b20f1a43c90a33aa06d955f34d5482..226ccce0f9e0734a08ff3da652c898ed21e378ce 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void) return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); } +static void sbi_send_cpumask_ipi(const struct cpumask *target) +{ + struct cpumask hartid_mask; + + riscv_cpuid_to_hartid_mask(target, &hartid_mask); + + sbi_send_ipi(cpumask_bits(&hartid_mask)); +} + +static struct riscv_ipi_ops sbi_ipi_ops = { + .ipi_inject = sbi_send_cpumask_ipi +}; int __init sbi_init(void) { @@ -587,5 +599,7 @@ int __init sbi_init(void) __sbi_rfence = __sbi_rfence_v01; } + riscv_set_ipi_ops(&sbi_ipi_ops); + return 0; } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index f04373be54a6b93a610009b1f1ce57bb951cbb92..2c6dd329312bd282a830685cc57331a90944889c 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include @@ -79,7 +78,6 @@ void __init setup_arch(char **cmdline_p) #else unflatten_device_tree(); #endif - clint_init_boot_cpu(); #ifdef CONFIG_SWIOTLB swiotlb_init(1); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 17ba190e84a53df83a3b9a6c263724de3d757b12..e996e08f1061e5e46bd38acd01623ae8ee87e532 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -250,7 +250,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->a0 = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->epc -= 0x4; diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 554b0fb470608394187dd54ac4f442949b5a6f06..ea028d9e0d2424e8f58148db4bdb6700acfa7bef 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include @@ -86,9 +85,25 @@ static void ipi_stop(void) wait_for_interrupt(); } +static struct riscv_ipi_ops *ipi_ops; + +void riscv_set_ipi_ops(struct riscv_ipi_ops *ops) +{ + ipi_ops = ops; +} +EXPORT_SYMBOL_GPL(riscv_set_ipi_ops); + +void riscv_clear_ipi(void) +{ + if (ipi_ops && ipi_ops->ipi_clear) + ipi_ops->ipi_clear(); + + csr_clear(CSR_IP, IE_SIE); +} +EXPORT_SYMBOL_GPL(riscv_clear_ipi); + static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) { - struct cpumask hartid_mask; int cpu; smp_mb__before_atomic(); @@ -96,33 +111,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) set_bit(op, &ipi_data[cpu].bits); smp_mb__after_atomic(); - riscv_cpuid_to_hartid_mask(mask, &hartid_mask); - if (IS_ENABLED(CONFIG_RISCV_SBI)) - sbi_send_ipi(cpumask_bits(&hartid_mask)); + if (ipi_ops && ipi_ops->ipi_inject) + ipi_ops->ipi_inject(mask); else - clint_send_ipi_mask(mask); + pr_warn("SMP: IPI inject method not available\n"); } static void send_ipi_single(int cpu, enum ipi_message_type op) { - int hartid = cpuid_to_hartid_map(cpu); - smp_mb__before_atomic(); set_bit(op, &ipi_data[cpu].bits); smp_mb__after_atomic(); - if (IS_ENABLED(CONFIG_RISCV_SBI)) - sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); - else - clint_send_ipi_single(hartid); -} - -static inline void clear_ipi(void) -{ - if (IS_ENABLED(CONFIG_RISCV_SBI)) - csr_clear(CSR_IP, IE_SIE); + if (ipi_ops && ipi_ops->ipi_inject) + ipi_ops->ipi_inject(cpumask_of(cpu)); else - clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); + pr_warn("SMP: IPI inject method not available\n"); } #ifdef CONFIG_IRQ_WORK @@ -140,7 +144,7 @@ void handle_IPI(struct pt_regs *regs) irq_enter(); - clear_ipi(); + riscv_clear_ipi(); while (true) { unsigned long ops; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 356825a57551c1cb7e97cf89ea80db1be129eb95..96167d55ed9845a23debad56fcbeb009c600c7d1 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -147,8 +146,7 @@ asmlinkage __visible void smp_callin(void) struct mm_struct *mm = &init_mm; unsigned int curr_cpuid = smp_processor_id(); - if (!IS_ENABLED(CONFIG_RISCV_SBI)) - clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); + riscv_clear_ipi(); /* All kernel threads share the same mm context. */ mmgrab(mm); diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index bc5f2204693fccc13c103ef0fc1369aaeb784323..579575f9cdae0873057ceb878fc6e9dce46e21c8 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -1020,7 +1020,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit_zext64(dst, ctx); break; } - /* Fallthrough. */ + fallthrough; case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_SUB | BPF_X: @@ -1079,7 +1079,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case 16: emit(rv_slli(lo(rd), lo(rd), 16), ctx); emit(rv_srli(lo(rd), lo(rd), 16), ctx); - /* Fallthrough. */ + fallthrough; case 32: if (!ctx->prog->aux->verifier_zext) emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 3d86e12e8e3c21ed6a23395f6d946f95411000f0..b29fcc66ec39b06e5baba74cf3d4fdb8bcbdf42b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS def_bool y config GENERIC_LOCKBREAK - def_bool y if PREEMPTTION + def_bool y if PREEMPTION config PGSTE def_bool y if KVM diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 0cf9a82326a85b4f254f46bd0648096621577831..7228aabe9da615f716e9b9fa32a3be19dd899023 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -626,6 +626,7 @@ CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_INODE64=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=m CONFIG_ECRYPT_FS=m @@ -807,6 +808,7 @@ CONFIG_DEBUG_NOTIFIERS=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_DEBUG_CREDENTIALS=y CONFIG_RCU_TORTURE_TEST=m +CONFIG_RCU_REF_SCALE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=300 # CONFIG_RCU_TRACE is not set CONFIG_LATENCYTOP=y @@ -818,6 +820,7 @@ CONFIG_PREEMPT_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y CONFIG_S390_PTDUMP=y CONFIG_NOTIFIER_ERROR_INJECTION=m @@ -829,6 +832,7 @@ CONFIG_FAIL_MAKE_REQUEST=y CONFIG_FAIL_IO_TIMEOUT=y CONFIG_FAIL_FUTEX=y CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAIL_FUNCTION=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 5df9759e8ff6700e6771f9999604b3192e545568..fab03b7a69324bd99085c452e5981f123d56f364 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -617,6 +617,7 @@ CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_INODE64=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=m CONFIG_ECRYPT_FS=m @@ -763,6 +764,7 @@ CONFIG_PANIC_ON_OOPS=y CONFIG_TEST_LOCKUP=m CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_RCU_TORTURE_TEST=m +CONFIG_RCU_REF_SCALE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y @@ -771,6 +773,7 @@ CONFIG_STACK_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BPF_KPROBE_OVERRIDE=y CONFIG_HIST_TRIGGERS=y CONFIG_S390_PTDUMP=y CONFIG_LKDTM=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 4091c50449cd06238578d5b640bb364269c5a293..8f67c55625f9eb0f744ebc529361f6a97d80a4dd 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -74,5 +74,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set # CONFIG_FTRACE is not set # CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 50b4ce8cddfdc4c2f0940e932169458ea4b2141c..918f0ba4f4d20f9934929639dcd4190b0b0f5a1e 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -29,7 +29,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ prev__ = *ptr__; \ do { \ @@ -37,7 +37,7 @@ new__ = old__ op (val); \ prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ new__; \ }) @@ -68,7 +68,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ @@ -84,7 +84,7 @@ : [val__] "d" (val__) \ : "cc"); \ } \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) @@ -95,14 +95,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ old__ + val__; \ }) @@ -114,14 +114,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") @@ -136,10 +136,10 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = cmpxchg(ptr__, oval, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -152,10 +152,10 @@ ({ \ typeof(pcp) *ptr__; \ typeof(pcp) ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = xchg(ptr__, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -171,11 +171,11 @@ typeof(pcp1) *p1__; \ typeof(pcp2) *p2__; \ int ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ p1__ = raw_cpu_ptr(&(pcp1)); \ p2__ = raw_cpu_ptr(&(pcp2)); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index 88bb42ca50084769c155acdcb3c1876fac07a287..c73f50649e7e41092c741635f5eed5f52afbd9e5 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -33,14 +33,13 @@ void enabled_wait(void) PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; clear_cpu_flag(CIF_NOHZ_DELAY); - trace_cpu_idle_rcuidle(1, smp_processor_id()); local_irq_save(flags); /* Call the assembler magic in entry.S */ psw_idle(idle, psw_mask); local_irq_restore(flags); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); /* Account time spent with enabled wait psw loaded as idle time. */ + /* XXX seqcount has tracepoints that require RCU */ write_seqcount_begin(&idle->seqcount); idle_time = idle->clock_idle_exit - idle->clock_idle_enter; idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 11d2f7d05f9191afd9510bc8b078ac6bf95caac2..a76dd27fb2e819330d384021d2eab8ba6a5a6836 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1268,7 +1268,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb) cb->pc == 1 && cb->qc == 0 && cb->reserved2 == 0 && - cb->key == PAGE_DEFAULT_KEY && cb->reserved3 == 0 && cb->reserved4 == 0 && cb->reserved5 == 0 && @@ -1330,7 +1329,11 @@ static int s390_runtime_instr_set(struct task_struct *target, kfree(data); return -EINVAL; } - + /* + * Override access key in any case, since user space should + * not be able to set it, nor should it care about it. + */ + ri_cb.key = PAGE_DEFAULT_KEY >> 4; preempt_disable(); if (!target->thread.ri_cb) target->thread.ri_cb = data; diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 125c7f6e87150d526a60c9e23b99c47bad1aae2b..1788a5454b6fc7a3a64b621b9b1880e35d6b705d 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->k = 1; cb->ps = 1; cb->pc = 1; - cb->key = PAGE_DEFAULT_KEY; + cb->key = PAGE_DEFAULT_KEY >> 4; cb->v = 1; } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 1aed1a4dfc2d6a446f6113d72ce4074404ef2a21..eddf71c22875e3a2487095a8dab0bd0809b85b0a 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -402,6 +402,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end, pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); if (!pud) goto out; + p4d_populate(&init_mm, p4d, pud); } ret = modify_pud_table(p4d, addr, next, add, direct); if (ret) diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 3902c9f6f2d63dd011f9e1c3244f062ed1d182cc..4b62d6b550246ddf20c8cd72dcabebfb6e8e2075 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -672,6 +672,19 @@ int zpci_disable_device(struct zpci_dev *zdev) } EXPORT_SYMBOL_GPL(zpci_disable_device); +void zpci_remove_device(struct zpci_dev *zdev) +{ + struct zpci_bus *zbus = zdev->zbus; + struct pci_dev *pdev; + + pdev = pci_get_slot(zbus->bus, zdev->devfn); + if (pdev) { + if (pdev->is_virtfn) + return zpci_remove_virtfn(pdev, zdev->vfn); + pci_stop_and_remove_bus_device_locked(pdev); + } +} + int zpci_create_device(struct zpci_dev *zdev) { int rc; @@ -716,13 +729,8 @@ void zpci_release_device(struct kref *kref) { struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref); - if (zdev->zbus->bus) { - struct pci_dev *pdev; - - pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); - if (pdev) - pci_stop_and_remove_bus_device_locked(pdev); - } + if (zdev->zbus->bus) + zpci_remove_device(zdev); switch (zdev->state) { case ZPCI_FN_STATE_ONLINE: diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 642a993846889935749280ad0b87d94c7b61841b..5967f30141563705e66067efc87c6d452e9b5fb3 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -132,13 +132,14 @@ static int zpci_bus_link_virtfn(struct pci_dev *pdev, { int rc; - virtfn->physfn = pci_dev_get(pdev); rc = pci_iov_sysfs_link(pdev, virtfn, vfid); - if (rc) { - pci_dev_put(pdev); - virtfn->physfn = NULL; + if (rc) return rc; - } + + virtfn->is_virtfn = 1; + virtfn->multifunction = 0; + virtfn->physfn = pci_dev_get(pdev); + return 0; } @@ -151,9 +152,9 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus, int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/ int rc = 0; - virtfn->is_virtfn = 1; - virtfn->multifunction = 0; - WARN_ON(vfid < 0); + if (!zbus->multifunction) + return 0; + /* If the parent PF for the given VF is also configured in the * instance, it must be on the same zbus. * We can then identify the parent PF by checking what @@ -165,11 +166,17 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus, zdev = zbus->function[i]; if (zdev && zdev->is_physfn) { pdev = pci_get_slot(zbus->bus, zdev->devfn); + if (!pdev) + continue; cand_devfn = pci_iov_virtfn_devfn(pdev, vfid); if (cand_devfn == virtfn->devfn) { rc = zpci_bus_link_virtfn(pdev, virtfn, vfid); + /* balance pci_get_slot() */ + pci_dev_put(pdev); break; } + /* balance pci_get_slot() */ + pci_dev_put(pdev); } } return rc; @@ -178,12 +185,23 @@ static int zpci_bus_setup_virtfn(struct zpci_bus *zbus, static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn) { - virtfn->is_virtfn = 1; - virtfn->multifunction = 0; return 0; } #endif +void pcibios_bus_add_device(struct pci_dev *pdev) +{ + struct zpci_dev *zdev = to_zpci(pdev); + + /* + * With pdev->no_vf_scan the common PCI probing code does not + * perform PF/VF linking. + */ + if (zdev->vfn) + zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn); + +} + static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev) { struct pci_bus *bus; @@ -214,20 +232,10 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev) } pdev = pci_scan_single_device(bus, zdev->devfn); - if (pdev) { - if (!zdev->is_physfn) { - rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn); - if (rc) - goto failed_with_pdev; - } + if (pdev) pci_bus_add_device(pdev); - } - return 0; -failed_with_pdev: - pci_stop_and_remove_bus_device(pdev); - pci_dev_put(pdev); - return rc; + return 0; } static void zpci_bus_add_devices(struct zpci_bus *zbus) diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index 89be3c354b7bcadfdead28b6f0d817d0d9eb8d3c..4972433df4581ab5acbbf5b33ac55e6b7db99375 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -29,3 +29,16 @@ static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus, return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn]; } + +#ifdef CONFIG_PCI_IOV +static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) +{ + + pci_lock_rescan_remove(); + /* Linux' vfid's start at 0 vfn at 1 */ + pci_iov_remove_virtfn(pdev->physfn, vfn - 1); + pci_unlock_rescan_remove(); +} +#else /* CONFIG_PCI_IOV */ +static inline void zpci_remove_virtfn(struct pci_dev *pdev, int vfn) {} +#endif /* CONFIG_PCI_IOV */ diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index fdebd286f40236d80939cff20c5d55b417e3fc9b..9a3a291cad43244feca43b05c2ad7896a38dded7 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -92,6 +92,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1); break; } + /* the configuration request may be stale */ + if (zdev->state != ZPCI_FN_STATE_STANDBY) + break; zdev->fh = ccdf->fh; zdev->state = ZPCI_FN_STATE_CONFIGURED; ret = zpci_enable_device(zdev); @@ -118,7 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) if (!zdev) break; if (pdev) - pci_stop_and_remove_bus_device_locked(pdev); + zpci_remove_device(zdev); ret = zpci_disable_device(zdev); if (ret) @@ -137,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) /* Give the driver a hint that the function is * already unusable. */ pdev->error_state = pci_channel_io_perm_failure; - pci_stop_and_remove_bus_device_locked(pdev); + zpci_remove_device(zdev); } zdev->state = ZPCI_FN_STATE_STANDBY; diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c index f3dc3f25b3ff08c2af8bd2a1db845fd2c6308ded..143747c45206fe6fd5e78cf57148127a9133c0ee 100644 --- a/arch/sh/drivers/platform_early.c +++ b/arch/sh/drivers/platform_early.c @@ -246,7 +246,7 @@ static int __init sh_early_platform_driver_probe_id(char *class_str, case EARLY_PLATFORM_ID_ERROR: pr_warn("%s: unable to parse %s parameter\n", class_str, epdrv->pdrv->driver.name); - /* fall-through */ + fallthrough; case EARLY_PLATFORM_ID_UNSET: match = NULL; break; diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c index 08e1af63edd9623c2fd941df937fa28a2aa1f7a2..34e25a439c811cbf3e113cc6c8fd82b57ff7afb4 100644 --- a/arch/sh/kernel/disassemble.c +++ b/arch/sh/kernel/disassemble.c @@ -486,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) pr_cont("xd%d", rn & ~1); break; } - /* else, fall through */ + fallthrough; case D_REG_N: pr_cont("dr%d", rn); break; @@ -495,7 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn) pr_cont("xd%d", rm & ~1); break; } - /* else, fall through */ + fallthrough; case D_REG_M: pr_cont("dr%d", rm); break; diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index 0d5f3c9d52f30edbcbf51141b8de16a294236903..e4147efa9ec6da2f2a73840361169c65324990d9 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -266,7 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->pc = addr; - /* fallthrough */ + fallthrough; case 'D': case 'k': atomic_set(&kgdb_cpu_doing_single_step, -1); diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index a0fbb8427b3911c66a9ef5ac21dd614d418ba3d5..4fe3f00137bc0d173aff33d121ce3602254408a2 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -418,7 +418,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, case -ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->regs[0] = save_r0; regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 4843f48bfe85f7f2543bde7d1670230ecefe2bc3..774a82b0c649f0bcf0d7bb15d66d7eaa805e41ed 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -87,7 +87,6 @@ void auxio_set_lte(int on) __auxio_sbus_set_lte(on); break; case AUXIO_TYPE_EBUS: - /* FALL-THROUGH */ default: break; } diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index bfae98ab863814dcd54418438f8715c184f89d72..23f8838dd96e3ad3dd6be1ff0f7c2394f1244e6b 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -55,7 +55,7 @@ static int clock_board_calc_nslots(struct clock_board *p) else return 5; } - /* Fallthrough */ + fallthrough; default: return 4; } diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 7580775a14b9e71567252dae142f5bb111efdcf8..58ad3f7de1fb531061e93bdb7ab346181b2fbffb 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, linux_regs->pc = addr; linux_regs->npc = addr + 4; } - /* fall through */ + fallthrough; case 'D': case 'k': diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 5d6c2d287e85533fafba6b9c3fb8243a32e4de93..177746ae2c817fed08a3119f131d09751b099646 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, linux_regs->tpc = addr; linux_regs->tnpc = addr + 4; } - /* fall through */ + fallthrough; case 'D': case 'k': diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index c0886b400dad23a58057d0f0869bb64ba16616a7..2a12c86af956e0dfbde6ceeaa5939a4e70e8fccf 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -359,7 +359,7 @@ int __init pcr_arch_init(void) * counter overflow interrupt so we can't make use of * their hardware currently. */ - /* fallthrough */ + fallthrough; default: err = -ENODEV; goto out_unregister; diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index da8902295c8c37780de6998dd818195c496dddf3..3df960c137f7641b8456ef61912e40e5c3ca1e0e 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -224,7 +224,7 @@ void __init of_console_init(void) case PROMDEV_TTYB: skip = 1; - /* FALLTHRU */ + fallthrough; case PROMDEV_TTYA: type = "serial"; diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index e2c6f0abda009be9972b82d76e5f4c18b5dd8334..e9695a06492ff2f51ee8466ff209ea4ce8f09778 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -646,7 +646,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; @@ -686,7 +686,7 @@ void do_signal32(struct pt_regs * regs) regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index f1f8c8ebe641d395f87034e466114b2467989cf9..d0e0025ee3ba82eaef7e79bcb9db85230ce1dbd2 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -440,7 +440,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; @@ -506,7 +506,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->pc -= 4; regs->npc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 6937339a272c5111297dc92d9d81a489777df438..255264bcb46a7cf6906060250addd2aeeee06c23 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -461,7 +461,7 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; - /* fallthrough */ + fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->tpc -= 4; @@ -532,7 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) regs->tpc -= 4; regs->tnpc -= 4; pt_regs_clear_syscall(regs); - /* fall through */ + fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->tpc -= 4; diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index 72e560ef4a0912f4b3d29939db0733e2e1a88742..d5beec856146088e06a77bf59df49145203df10f 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -359,7 +359,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); @@ -380,7 +380,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); @@ -408,13 +408,13 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } - /* fall through */ + fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); return 0; } - /* fall through */ + fallthrough; case 1: rd = (void *)&fregs[freg]; break; diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c index c8eabb973b8688648dff42f70bd6f05b43a35103..b1dbf2fa8c0ae82e4bdb5ed87b3ddc8964a96d6d 100644 --- a/arch/sparc/net/bpf_jit_comp_32.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -491,7 +491,7 @@ void bpf_jit_compile(struct bpf_prog *fp) } else { emit_loadimm(K, r_A); } - /* Fallthrough */ + fallthrough; case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 3d57c71c532e47a41654d1a2974824f52f4d3ddf..88cd9b5c1b74442f72658b6740e24e9869e5c5f1 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -70,7 +70,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) PT_REGS_SYSCALL_RET(regs) = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: PT_REGS_RESTART_SYSCALL(regs); PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs); diff --git a/arch/x86/boot/cmdline.c b/arch/x86/boot/cmdline.c index 4ff01176c1cc1dfcf812b9750e428d370ea78497..21d56ae83cdf1701cc5b6e8030cefb44daa79df5 100644 --- a/arch/x86/boot/cmdline.c +++ b/arch/x86/boot/cmdline.c @@ -54,7 +54,7 @@ int __cmdline_find_option(unsigned long cmdline_ptr, const char *option, char *b /* else */ state = st_wordcmp; opptr = option; - /* fall through */ + fallthrough; case st_wordcmp: if (c == '=' && !*opptr) { @@ -129,7 +129,7 @@ int __cmdline_find_option_bool(unsigned long cmdline_ptr, const char *option) state = st_wordcmp; opptr = option; wstart = pos; - /* fall through */ + fallthrough; case st_wordcmp: if (!*opptr) diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 0048269180d58ab1ca597df5ada2a7fcd66c8c9e..dde7cb3724df38e7c51af9fdc73142167ad298e9 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -178,7 +178,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size, } *size = 0; } - /* Fall through */ + fallthrough; default: /* * If w/o offset, only size specified, memmap=nn[KMG] has the diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 39e592d0e0b418c9a34110b98a506677555e5f55..e478e40fbe5a9434fcb9ee095669969e9f41130e 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -30,12 +30,9 @@ #define STATIC static /* - * Use normal definitions of mem*() from string.c. There are already - * included header files which expect a definition of memset() and by - * the time we define memset macro, it is too late. + * Provide definitions of memzero and memmove as some of the decompressors will + * try to define their own functions if these are not defined as macros. */ -#undef memcpy -#undef memset #define memzero(s, n) memset((s), 0, (n)) #define memmove memmove diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h index 995f7b7ad512ef3b3faba63dda8244e860a6cfe4..a232da487cd204ab6081142f90267a2cd0220b1d 100644 --- a/arch/x86/boot/string.h +++ b/arch/x86/boot/string.h @@ -11,10 +11,7 @@ void *memcpy(void *dst, const void *src, size_t len); void *memset(void *dst, int c, size_t len); int memcmp(const void *s1, const void *s2, size_t len); -/* - * Access builtin version by default. If one needs to use optimized version, - * do "undef memcpy" in .c file and link against right string.c - */ +/* Access builtin version by default. */ #define memcpy(d,s,l) __builtin_memcpy(d,s,l) #define memset(d,c,l) __builtin_memset(d,c,l) #define memcmp __builtin_memcmp diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 98e4d8886f11c5a08b34d52b5582fb131a6f8da8..ae9b0d4615b3254e83af6a5e6d9949831122f5fd 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -374,12 +374,14 @@ For 32-bit we have the following conventions - kernel is built with * Fetch the per-CPU GSBASE value for this processor and put it in @reg. * We normally use %gs for accessing per-CPU data, but we are setting up * %gs here and obviously can not use %gs itself to access per-CPU data. + * + * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and + * may not restore the host's value until the CPU returns to userspace. + * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives + * while running KVM's run loop. */ .macro GET_PERCPU_BASE reg:req - ALTERNATIVE \ - "LOAD_CPU_AND_NODE_SEG_LIMIT \reg", \ - "RDPID \reg", \ - X86_FEATURE_RDPID + LOAD_CPU_AND_NODE_SEG_LIMIT \reg andq $VDSO_CPUNODE_MASK, \reg movq __per_cpu_offset(, \reg, 8), \reg .endm diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 48512c7944e7e63ae70f2e452fc318e6b0e4fcf6..2f84c7ca74ea970c61b123a9d95f42695aa70a03 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -60,16 +60,10 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs) #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs) { - unsigned int nr = (unsigned int)regs->orig_ax; - if (IS_ENABLED(CONFIG_IA32_EMULATION)) current_thread_info()->status |= TS_COMPAT; - /* - * Subtlety here: if ptrace pokes something larger than 2^32-1 into - * orig_ax, the unsigned int return value truncates it. This may - * or may not be necessary, but it matches the old asm behavior. - */ - return (unsigned int)syscall_enter_from_user_mode(regs, nr); + + return (unsigned int)regs->orig_ax; } /* @@ -91,15 +85,29 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs) { unsigned int nr = syscall_32_enter(regs); + /* + * Subtlety here: if ptrace pokes something larger than 2^32-1 into + * orig_ax, the unsigned int return value truncates it. This may + * or may not be necessary, but it matches the old asm behavior. + */ + nr = (unsigned int)syscall_enter_from_user_mode(regs, nr); + do_syscall_32_irqs_on(regs, nr); syscall_exit_to_user_mode(regs); } static noinstr bool __do_fast_syscall_32(struct pt_regs *regs) { - unsigned int nr = syscall_32_enter(regs); + unsigned int nr = syscall_32_enter(regs); int res; + /* + * This cannot use syscall_enter_from_user_mode() as it has to + * fetch EBP before invoking any of the syscall entry work + * functions. + */ + syscall_enter_from_user_mode_prepare(regs); + instrumentation_begin(); /* Fetch EBP from where the vDSO stashed it. */ if (IS_ENABLED(CONFIG_X86_64)) { @@ -122,6 +130,9 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs) return false; } + /* The case truncates any ptrace induced syscall nr > 2^32 -1 */ + nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr); + /* Now this is just like a normal syscall. */ do_syscall_32_irqs_on(regs, nr); syscall_exit_to_user_mode(regs); diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index 3a07ce3ec70bafd65e437a837796ba0b5d4c53ed..f1f96d4d8cd607ef2a11cf4850cbfe8f50cfcb9b 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name) SYM_CODE_END(\name) .endm -#ifdef CONFIG_TRACE_IRQFLAGS - THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1 - THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1 -#endif - #ifdef CONFIG_PREEMPTION THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 50963472ee850c35abed7d43b097f72a4c9277b7..31e6887d24f1a9dadfc1ad0f0fd9ea955daca925 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4682,7 +4682,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_CORE2_MEROM: x86_add_quirk(intel_clovertown_quirk); - /* fall through */ + fallthrough; case INTEL_FAM6_CORE2_MEROM_L: case INTEL_FAM6_CORE2_PENRYN: @@ -5062,7 +5062,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: pmem = true; - /* fall through */ + fallthrough; case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: @@ -5114,7 +5114,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_ICELAKE_D: pmem = true; - /* fall through */ + fallthrough; case INTEL_FAM6_ICELAKE_L: case INTEL_FAM6_ICELAKE: case INTEL_FAM6_TIGERLAKE_L: diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 63f58bdf556c433f46a215ba70cc12204250313c..8961653c5dd2ba91118c566a1c0738ab0ad9d4d7 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1268,7 +1268,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ret = X86_BR_ZERO_CALL; break; } - /* fall through */ + fallthrough; case 0x9a: /* call far absolute */ ret = X86_BR_CALL; break; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index cb94ba86efd28f5ef1e69bcd4ac0976f29e17965..6a4ca27b2c9e126a861e472a14d1659e690b40aa 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -390,6 +390,18 @@ static struct uncore_event_desc snb_uncore_imc_events[] = { INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), + INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"), + INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"), + + INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"), + INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"), + + INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"), + INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"), + INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"), + { /* end: all zeroes */ }, }; @@ -405,13 +417,35 @@ static struct uncore_event_desc snb_uncore_imc_events[] = { #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE +/* BW break down- legacy counters */ +#define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3 +#define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040 +#define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4 +#define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044 +#define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5 +#define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048 + enum perf_snb_uncore_imc_freerunning_types { - SNB_PCI_UNCORE_IMC_DATA = 0, + SNB_PCI_UNCORE_IMC_DATA_READS = 0, + SNB_PCI_UNCORE_IMC_DATA_WRITES, + SNB_PCI_UNCORE_IMC_GT_REQUESTS, + SNB_PCI_UNCORE_IMC_IA_REQUESTS, + SNB_PCI_UNCORE_IMC_IO_REQUESTS, + SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, }; static struct freerunning_counters snb_uncore_imc_freerunning[] = { - [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 }, + [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, + 0x0, 0x0, 1, 32 }, + [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, + 0x0, 0x0, 1, 32 }, + [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, + 0x0, 0x0, 1, 32 }, + [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE, + 0x0, 0x0, 1, 32 }, + [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE, + 0x0, 0x0, 1, 32 }, }; static struct attribute *snb_uncore_imc_formats_attr[] = { @@ -525,6 +559,18 @@ static int snb_uncore_imc_event_init(struct perf_event *event) base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; idx = UNCORE_PMC_IDX_FREERUNNING; break; + case SNB_UNCORE_PCI_IMC_GT_REQUESTS: + base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE; + idx = UNCORE_PMC_IDX_FREERUNNING; + break; + case SNB_UNCORE_PCI_IMC_IA_REQUESTS: + base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE; + idx = UNCORE_PMC_IDX_FREERUNNING; + break; + case SNB_UNCORE_PCI_IMC_IO_REQUESTS: + base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE; + idx = UNCORE_PMC_IDX_FREERUNNING; + break; default: return -EINVAL; } @@ -598,7 +644,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = { static struct intel_uncore_type snb_uncore_imc = { .name = "imc", - .num_counters = 2, + .num_counters = 5, .num_boxes = 1, .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE, diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index b9c2667ac46cc6bb73cb0b26519a9e4bfb14d90f..bc9758ef292ef1a10fc6542f391bbdb9b82db058 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -81,11 +81,8 @@ extern unsigned long efi_fw_vendor, efi_config_table; kernel_fpu_end(); \ }) - #define arch_efi_call_virt(p, f, args...) p->f(args) -#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) - #else /* !CONFIG_X86_32 */ #define EFI_LOADER_SIGNATURE "EL64" @@ -125,9 +122,6 @@ struct efi_scratch { kernel_fpu_end(); \ }) -extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, - u32 type, u64 attribute); - #ifdef CONFIG_KASAN /* * CONFIG_KASAN may redefine memset to __memset. __memset function is present @@ -143,17 +137,13 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, #endif /* CONFIG_X86_32 */ extern struct efi_scratch efi_scratch; -extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); extern int __init efi_memblock_x86_reserve_range(void); extern void __init efi_print_memmap(void); -extern void __init efi_memory_uc(u64 addr, unsigned long size); extern void __init efi_map_region(efi_memory_desc_t *md); extern void __init efi_map_region_fixed(efi_memory_desc_t *md); extern void efi_sync_low_kernel_mappings(void); extern int __init efi_alloc_page_tables(void); extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); -extern void __init old_map_region(efi_memory_desc_t *md); -extern void __init runtime_code_page_mkexec(void); extern void __init efi_runtime_update_mappings(void); extern void __init efi_dump_pagetable(void); extern void __init efi_apply_memmap_quirks(void); diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index a8f9315b9eaeff990bf3781b3f085b16619acf8b..6fe54b2813c13960786595db35b7b1f2b0c0dba1 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -18,8 +18,16 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs) * state, not the interrupt state as imagined by Xen. */ unsigned long flags = native_save_fl(); - WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF | - X86_EFLAGS_NT)); + unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT; + + /* + * For !SMAP hardware we patch out CLAC on entry. + */ + if (boot_cpu_has(X86_FEATURE_SMAP) || + (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV))) + mask |= X86_EFLAGS_AC; + + WARN_ON_ONCE(flags & mask); /* We think we came from user mode. Make sure pt_regs agrees. */ WARN_ON_ONCE(!user_mode(regs)); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5ab3af7275d81d2fc1517eb54e315756f47dd1b8..5303dbc5c9bce438aa89527d733b2b2bee39d960 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1596,7 +1596,8 @@ asmlinkage void kvm_spurious_fault(void); _ASM_EXTABLE(666b, 667b) #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 0a301ad0b02f0018c054bb2676340860f7d5f9bc..9257667d13c5e37519ccfe6c5440129ed1d4f4c3 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -59,5 +59,6 @@ typedef struct { } void leave_mm(int cpu); +#define leave_mm leave_mm #endif /* _ASM_X86_MMU_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 40aa69d04862e1d00d82a2dd2fd5f3e7b7f495f3..d8324a23669610e379078543e3cbed3855aaae3e 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -327,8 +327,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, static const unsigned int argument_offs[] = { #ifdef __i386__ offsetof(struct pt_regs, ax), - offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, cx), #define NR_REG_ARGUMENTS 3 #else offsetof(struct pt_regs, di), diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c3daf0aaa0ee6d387a5ff1c2ddafff2bea746fb1..cdaab30880b91c969344e42ba3392f98a055def3 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -239,7 +239,7 @@ void __init arch_init_ideal_nops(void) return; } - /* fall through */ + fallthrough; default: #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 21325a4a78b92d6d03be3fc6e96a02ec91ad9b3a..779a89e31c4cb937b5efe089633bd5d0d7a270d8 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -800,7 +800,7 @@ static int irq_polarity(int idx) return IOAPIC_POL_HIGH; case MP_IRQPOL_RESERVED: pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n"); - /* fall through */ + fallthrough; case MP_IRQPOL_ACTIVE_LOW: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_POL_LOW; @@ -848,7 +848,7 @@ static int irq_trigger(int idx) return IOAPIC_EDGE; case MP_IRQTRIG_RESERVED: pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n"); - /* fall through */ + fallthrough; case MP_IRQTRIG_LEVEL: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_LEVEL; diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 7bda71def557bc559310bff246eddb09d1017ec1..99ee61c9ba548f3485edcad9b9c1874c10828af5 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -149,7 +149,7 @@ void __init default_setup_apic_routing(void) break; } /* P4 and above */ - /* fall through */ + fallthrough; case X86_VENDOR_HYGON: case X86_VENDOR_AMD: def_to_bigsmp = 1; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index dae32d948bf25ead0ff7193ea31ee32fb926fe5f..f8a56b5dc29fe572a2e2fcb50e5e0c57b661c043 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -161,6 +161,7 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, apicd->move_in_progress = true; apicd->prev_vector = apicd->vector; apicd->prev_cpu = apicd->cpu; + WARN_ON_ONCE(apicd->cpu == newcpu); } else { irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, managed); @@ -910,7 +911,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) __send_cleanup_vector(apicd); } -static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) +void irq_complete_move(struct irq_cfg *cfg) { struct apic_chip_data *apicd; @@ -918,15 +919,16 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) if (likely(!apicd->move_in_progress)) return; - if (vector == apicd->vector && apicd->cpu == smp_processor_id()) + /* + * If the interrupt arrived on the new target CPU, cleanup the + * vector on the old target CPU. A vector check is not required + * because an interrupt can never move from one vector to another + * on the same CPU. + */ + if (apicd->cpu == smp_processor_id()) __send_cleanup_vector(apicd); } -void irq_complete_move(struct irq_cfg *cfg) -{ - __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); -} - /* * Called from fixup_irqs() with @desc->lock held and interrupts disabled. */ diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index c7503be92f35936d6d9b66ac310f22e9b12d7a41..57074cf3ad7c1a28dc1abf453920461c2eea9b66 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -248,7 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, switch (leaf) { case 1: l1 = &l1i; - /* fall through */ + fallthrough; case 0: if (!l1->val) return; diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 7843ab3fde099d58c88a11a1c7fbcde9b9929795..3a44346f2276601a06d9aa5a5aae4b78c48b2418 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -199,7 +199,7 @@ static int raise_local(void) * calling irq_enter, but the necessary * machinery isn't exported currently. */ - /*FALL THROUGH*/ + fallthrough; case MCJ_CTX_PROCESS: raise_exception(m, NULL); break; diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index d8f9230d2034132ea6b026153841722e67766a70..abe9fe0fb8517ce9b1af73740684708a83f07829 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -193,7 +193,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval) if (!atomic_sub_return(1, &cmci_storm_on_cpus)) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); - /* FALLTHROUGH */ + fallthrough; case CMCI_STORM_SUBSIDED: /* diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index 72182809b3339bc610c5c1fa23633ee236d66359..ca670919b56180e8c390bc471608fdcd5dc14721 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c @@ -98,7 +98,7 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) case 7: if (size < 0x40) break; - /* Else, fall through */ + fallthrough; case 6: case 5: case 4: diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 8cdf29ffd95f170b5aa959817dd7af12c00216d9..b98ff620ba77250164c689fe2cc5c7727bbf25b9 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -349,7 +349,7 @@ static int arch_build_bp_info(struct perf_event *bp, hw->len = X86_BREAKPOINT_LEN_X; return 0; } - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 68acd30c6b878bacb65244c0edb1e3ddbdde52c0..c2f02f308ecfc2c045bde69bd8cfe7545d3294aa 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -450,7 +450,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; - /* fall through */ + fallthrough; case 'D': case 'k': /* clear the trace bit */ @@ -539,7 +539,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) * a system call which should be ignored */ return NOTIFY_DONE; - /* fall through */ + fallthrough; default: if (user_mode(regs)) return NOTIFY_DONE; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 08320b0b2b276f0ceb82e28b07812d12dfdd0465..1b51b727b140572dbe517631725be45c47a1490a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -270,9 +270,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt) { struct pt_regs *old_regs = set_irq_regs(regs); u32 token; - irqentry_state_t state; - state = irqentry_enter(regs); + ack_APIC_irq(); inc_irq_stat(irq_hv_callback_count); @@ -283,7 +282,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt) wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1); } - irqentry_exit(regs, state); set_irq_regs(old_regs); } @@ -654,7 +652,6 @@ static void __init kvm_guest_init(void) } if (pv_tlb_flush_supported()) { - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; pv_ops.mmu.tlb_remove_table = tlb_remove_table; pr_info("KVM setup pv remote TLB flush\n"); } @@ -767,6 +764,14 @@ static __init int activate_jump_labels(void) } arch_initcall(activate_jump_labels); +static void kvm_free_pv_cpu_mask(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + free_cpumask_var(per_cpu(__pv_cpu_mask, cpu)); +} + static __init int kvm_alloc_cpumask(void) { int cpu; @@ -785,11 +790,20 @@ static __init int kvm_alloc_cpumask(void) if (alloc) for_each_possible_cpu(cpu) { - zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), - GFP_KERNEL, cpu_to_node(cpu)); + if (!zalloc_cpumask_var_node( + per_cpu_ptr(&__pv_cpu_mask, cpu), + GFP_KERNEL, cpu_to_node(cpu))) { + goto zalloc_cpumask_fail; + } } + apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; + pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; return 0; + +zalloc_cpumask_fail: + kvm_free_pv_cpu_mask(); + return -ENOMEM; } arch_initcall(kvm_alloc_cpumask); diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 411af4aa7b51f3de393c9efa04be95697838734b..baa21090c9be31733be4b8cc175565dd82206ef3 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -312,7 +312,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) case 2: if (i == 0 || i == 13) continue; /* IRQ0 & IRQ13 not connected */ - /* fall through */ + fallthrough; default: if (i == 2) continue; /* IRQ2 is never connected */ @@ -356,7 +356,7 @@ static void __init construct_ioapic_table(int mpc_default_type) default: pr_err("???\nUnknown standard configuration %d\n", mpc_default_type); - /* fall through */ + fallthrough; case 1: case 5: memcpy(bus.bustype, "ISA ", 6); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 994d8393f2f7b078b7e5f4c0111d06e01ccae994..13ce616cc7afb7af965fde21772978bee8e07a58 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -684,9 +684,7 @@ void arch_cpu_idle(void) */ void __cpuidle default_idle(void) { - trace_cpu_idle_rcuidle(1, smp_processor_id()); safe_halt(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) EXPORT_SYMBOL(default_idle); @@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) static __cpuidle void mwait_idle(void) { if (!current_set_polling_and_test()) { - trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { mb(); /* quirk */ clflush((void *)¤t_thread_info()->flags); @@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void) __sti_mwait(0, 0); else local_irq_enable(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 5679aa3fdcb87be4c504c3cbe9bc08c55509402b..e7537c5440bbac1cdf31ddd42bfb36055d16b33f 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -204,7 +204,7 @@ static int set_segment_reg(struct task_struct *task, case offsetof(struct user_regs_struct, ss): if (unlikely(value == 0)) return -EIO; - /* Else, fall through */ + fallthrough; default: *pt_regs_access(task_pt_regs(task), offset) = value; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 0ec7ced727fe859f7c6cd616348bfca6e156b1cb..a515e2d230b7564ae94b3cea11f6cca63318a91e 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -654,7 +654,7 @@ static void native_machine_emergency_restart(void) case BOOT_CF9_FORCE: port_cf9_safe = true; - /* Fall through */ + fallthrough; case BOOT_CF9_SAFE: if (port_cf9_safe) { diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index d5fa494c2304de8f92b8dc04ed426faea9ab8702..be0d7d4152eca4302535369110e373c1486c78bf 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -726,7 +726,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) regs->ax = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->ax = regs->orig_ax; regs->ip -= 2; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 27aa04a9570211c0d89e4d7a3112b65c286198dc..f5ef689dd62ad743e18a9159206eb8ce15a13803 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1594,14 +1594,28 @@ int native_cpu_disable(void) if (ret) return ret; - /* - * Disable the local APIC. Otherwise IPI broadcasts will reach - * it. It still responds normally to INIT, NMI, SMI, and SIPI - * messages. - */ - apic_soft_disable(); cpu_disable_common(); + /* + * Disable the local APIC. Otherwise IPI broadcasts will reach + * it. It still responds normally to INIT, NMI, SMI, and SIPI + * messages. + * + * Disabling the APIC must happen after cpu_disable_common() + * which invokes fixup_irqs(). + * + * Disabling the APIC preserves already set bits in IRR, but + * an interrupt arriving after disabling the local APIC does not + * set the corresponding IRR bit. + * + * fixup_irqs() scans IRR for set bits so it can raise a not + * yet handled interrupt on the new destination CPU via an IPI + * but obviously it can't do so for IRR bits which are not set. + * IOW, interrupts arriving after disabling the local APIC will + * be lost. + */ + apic_soft_disable(); + return 0; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1f66d2d1e998db62758fb03e1b64d5c1ae05f081..81a2fb711091c9ee4829dfb753eacd8d40cc4bf1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -729,20 +729,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs) #endif } -static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7) +static __always_inline unsigned long debug_read_clear_dr6(void) { - /* - * Disable breakpoints during exception handling; recursive exceptions - * are exceedingly 'fun'. - * - * Since this function is NOKPROBE, and that also applies to - * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a - * HW_BREAKPOINT_W on our stack) - * - * Entry text is excluded for HW_BP_X and cpu_entry_area, which - * includes the entry stack is excluded for everything. - */ - *dr7 = local_db_save(); + unsigned long dr6; /* * The Intel SDM says: @@ -755,15 +744,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7) * * Keep it simple: clear DR6 immediately. */ - get_debugreg(*dr6, 6); + get_debugreg(dr6, 6); set_debugreg(0, 6); /* Filter out all the reserved bits which are preset to 1 */ - *dr6 &= ~DR6_RESERVED; -} + dr6 &= ~DR6_RESERVED; -static __always_inline void debug_exit(unsigned long dr7) -{ - local_db_restore(dr7); + return dr6; } /* @@ -863,6 +849,18 @@ static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user) static __always_inline void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6) { + /* + * Disable breakpoints during exception handling; recursive exceptions + * are exceedingly 'fun'. + * + * Since this function is NOKPROBE, and that also applies to + * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a + * HW_BREAKPOINT_W on our stack) + * + * Entry text is excluded for HW_BP_X and cpu_entry_area, which + * includes the entry stack is excluded for everything. + */ + unsigned long dr7 = local_db_save(); bool irq_state = idtentry_enter_nmi(regs); instrumentation_begin(); @@ -883,6 +881,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, instrumentation_end(); idtentry_exit_nmi(regs, irq_state); + + local_db_restore(dr7); } static __always_inline void exc_debug_user(struct pt_regs *regs, @@ -894,6 +894,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, */ WARN_ON_ONCE(!user_mode(regs)); + /* + * NB: We can't easily clear DR7 here because + * idtentry_exit_to_usermode() can invoke ptrace, schedule, access + * user memory, etc. This means that a recursive #DB is possible. If + * this happens, that #DB will hit exc_debug_kernel() and clear DR7. + * Since we're not on the IST stack right now, everything will be + * fine. + */ + irqentry_enter_from_user_mode(regs); instrumentation_begin(); @@ -907,36 +916,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, /* IST stack entry */ DEFINE_IDTENTRY_DEBUG(exc_debug) { - unsigned long dr6, dr7; - - debug_enter(&dr6, &dr7); - exc_debug_kernel(regs, dr6); - debug_exit(dr7); + exc_debug_kernel(regs, debug_read_clear_dr6()); } /* User entry, runs on regular task stack */ DEFINE_IDTENTRY_DEBUG_USER(exc_debug) { - unsigned long dr6, dr7; - - debug_enter(&dr6, &dr7); - exc_debug_user(regs, dr6); - debug_exit(dr7); + exc_debug_user(regs, debug_read_clear_dr6()); } #else /* 32 bit does not have separate entry points. */ DEFINE_IDTENTRY_RAW(exc_debug) { - unsigned long dr6, dr7; - - debug_enter(&dr6, &dr7); + unsigned long dr6 = debug_read_clear_dr6(); if (user_mode(regs)) exc_debug_user(regs, dr6); else exc_debug_kernel(regs, dr6); - - debug_exit(dr7); } #endif diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 15e5aad8ac2c1dd2f3c26869234a17d3f26c243d..3fdaa042823d026366eaddcf95dc0f7de0775e89 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -735,7 +735,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * OPCODE1() of the "short" jmp which checks the same condition. */ opc1 = OPCODE2(insn) - 0x10; - /* fall through */ + fallthrough; default: if (!is_cond_jmp_opcode(opc1)) return -ENOSYS; @@ -892,7 +892,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, fix_ip_or_call = 0; break; } - /* fall through */ + fallthrough; default: riprel_analyze(auprobe, &insn); } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d0e2825ae6174c1e172ec522b5fb900a06a3064e..2f6510de6b0c037ae969aa8f992b5814be694127 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); val = GET_SMSTATE(u32, smstate, 0x7fcc); - ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); + + if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1)) + return X86EMUL_UNHANDLEABLE; + val = GET_SMSTATE(u32, smstate, 0x7fc8); - ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); + + if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1)) + return X86EMUL_UNHANDLEABLE; selector = GET_SMSTATE(u32, smstate, 0x7fc4); set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64)); @@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; val = GET_SMSTATE(u32, smstate, 0x7f68); - ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); + + if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1)) + return X86EMUL_UNHANDLEABLE; + val = GET_SMSTATE(u32, smstate, 0x7f60); - ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); + + if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1)) + return X86EMUL_UNHANDLEABLE; cr0 = GET_SMSTATE(u64, smstate, 0x7f58); cr3 = GET_SMSTATE(u64, smstate, 0x7f50); cr4 = GET_SMSTATE(u64, smstate, 0x7f48); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00)); val = GET_SMSTATE(u64, smstate, 0x7ed0); - ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); + + if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA)) + return X86EMUL_UNHANDLEABLE; selector = GET_SMSTATE(u32, smstate, 0x7e90); rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8); @@ -3016,7 +3028,7 @@ static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) case 0xa4: /* movsb */ case 0xa5: /* movsd/w */ *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; - /* fall through */ + fallthrough; case 0xaa: /* stosb */ case 0xab: /* stosd/w */ *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 814d3aee5ceff10b2ac1d16bfaeebd40c9c75983..1d330564eed8ca2bf5995f587dc2f9d4a2caf2d4 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1779,7 +1779,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); if (ret != HV_STATUS_INVALID_PORT_ID) break; - /* fall through - maybe userspace knows this conn_id. */ + fallthrough; /* maybe userspace knows this conn_id */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index c47d2acec52934eae639a8c4b347fbfa660caca6..4aa1c2e00e2abb76ac57e1b2fdc495539b5e5d69 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -285,7 +285,7 @@ int kvm_set_routing_entry(struct kvm *kvm, switch (ue->u.irqchip.irqchip) { case KVM_IRQCHIP_PIC_SLAVE: e->irqchip.pin += PIC_NUM_PINS / 2; - /* fall through */ + fallthrough; case KVM_IRQCHIP_PIC_MASTER: if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) return -EINVAL; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5ccbee7165a215f31744073adcb74364939a956d..35cca2e0c802691c8e5c0028393bd22380a299a3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1053,7 +1053,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, switch (delivery_mode) { case APIC_DM_LOWEST: vcpu->arch.apic_arb_prio++; - /* fall through */ + fallthrough; case APIC_DM_FIXED: if (unlikely(trig_mode && !level)) break; @@ -1341,7 +1341,7 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) break; case APIC_TASKPRI: report_tpr_access(apic, false); - /* fall thru */ + fallthrough; default: val = kvm_lapic_get_reg(apic, offset); break; @@ -2027,7 +2027,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_LVT0: apic_manage_nmi_watchdog(apic, val); - /* fall through */ + fallthrough; case APIC_LVTTHMR: case APIC_LVTPC: case APIC_LVT1: diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4e03841f053dec97081d421a7720fab88253946d..71aa3da2a0b7b015ad091e7b229c4dc0a20182c4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1916,7 +1916,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + unsigned flags) { return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); } @@ -2468,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, } if (sp->unsync_children) - kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); __clear_sp_write_flooding_count(sp); @@ -4421,7 +4422,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, rsvd_bits(maxphyaddr, 51); rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; - /* fall through */ + fallthrough; case PT64_ROOT_4LEVEL: rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | nonleaf_bit8_rsvd | rsvd_bits(7, 7) | diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index fb68467e60496aa5bc2abd2f8fa5dfd85e257ebd..e90bc436f5849b3bbe4727a5cd6e59d5b687c8d1 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; /* Give the current vmcb to the guest */ - svm_set_gif(svm, false); nested_vmcb->save.es = vmcb->save.es; nested_vmcb->save.cs = vmcb->save.cs; @@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm) /* Restore the original control entries */ copy_vmcb_control_area(&vmcb->control, &hsave->control); + /* On vmexit the GIF is set to false */ + svm_set_gif(svm, false); + svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; @@ -1132,6 +1134,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, load_nested_vmcb_control(svm, &ctl); nested_prepare_vmcb_control(svm); + if (!nested_svm_vmrun_msrpm(svm)) + return -EINVAL; + out_set_gif: svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); return 0; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 402dc4234e397861daef0be131a61118c8f2681a..7bf7bf734979488ac0f1cf10808f32ce3be882a9 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1106,6 +1106,7 @@ void sev_vm_destroy(struct kvm *kvm) list_for_each_safe(pos, q, head) { __unregister_enc_region_locked(kvm, list_entry(pos, struct enc_region, list)); + cond_resched(); } } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 03dd7bac80348857b378be2edff0382302ed6e2a..c44f3e9140d5fe7432f0a48b938460c44c46305b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2668,7 +2668,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); - /* Fall through */ + fallthrough; default: return kvm_set_msr_common(vcpu, msr); } @@ -2938,8 +2938,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; - svm_complete_interrupts(svm); - if (is_guest_mode(vcpu)) { int vmexit; @@ -3504,7 +3502,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) stgi(); /* Any pending NMI will happen here */ - exit_fastpath = svm_exit_handlers_fastpath(vcpu); if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_after_interrupt(&svm->vcpu); @@ -3518,6 +3515,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) } svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; + vmcb_mark_all_clean(svm->vmcb); /* if exit due to PF check for async PF */ if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) @@ -3537,7 +3535,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) SVM_EXIT_EXCP_BASE + MC_VECTOR)) svm_handle_mce(svm); - vmcb_mark_all_clean(svm->vmcb); + svm_complete_interrupts(svm); + exit_fastpath = svm_exit_handlers_fastpath(vcpu); return exit_fastpath; } @@ -3900,21 +3899,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) { struct vcpu_svm *svm = to_svm(vcpu); - struct vmcb *nested_vmcb; struct kvm_host_map map; - u64 guest; - u64 vmcb; int ret = 0; - guest = GET_SMSTATE(u64, smstate, 0x7ed8); - vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { + u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); + u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); + u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); - if (guest) { - if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) - return 1; - nested_vmcb = map.hva; - ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb); - kvm_vcpu_unmap(&svm->vcpu, &map, true); + if (guest) { + if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) + return 1; + + if (!(saved_efer & EFER_SVME)) + return 1; + + if (kvm_vcpu_map(&svm->vcpu, + gpa_to_gfn(vmcb), &map) == -EINVAL) + return 1; + + ret = enter_svm_guest_mode(svm, vmcb, map.hva); + kvm_vcpu_unmap(&svm->vcpu, &map, true); + } } return ret; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 23b58c28a1c926f461cb87c9ee1f03a310f23e25..1bb6b31eb64666d57eb5095566f5579b1d26694c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) kvm_vcpu_flush_tlb_current(vcpu); + /* + * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between + * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are + * up-to-date before switching to L1. + */ + if (enable_ept && is_pae_paging(vcpu)) + vmx_ept_load_pdptrs(vcpu); + leave_guest_mode(vcpu); if (nested_cpu_has_preemption_timer(vmcs12)) @@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; vmx->nested.msrs.exit_ctls_high &= - ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; } } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 46ba2e03a8926d7a3d47463b5661ecd857aa76fb..8646a797b7a838d3f484e868bf8ac762e3a8858f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2971,7 +2971,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu) vpid_sync_context(to_vmx(vcpu)->vpid); } -static void ept_load_pdptrs(struct kvm_vcpu *vcpu) +void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; @@ -3114,7 +3114,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd, guest_cr3 = vcpu->arch.cr3; else /* vmcs01.GUEST_CR3 is already up-to-date. */ update_guest_cr3 = false; - ept_load_pdptrs(vcpu); + vmx_ept_load_pdptrs(vcpu); } else { guest_cr3 = pgd; } @@ -4654,7 +4654,7 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; - /* fall through */ + fallthrough; case DB_VECTOR: return !(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)); @@ -4827,7 +4827,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); - /* fall through */ + fallthrough; case BP_VECTOR: /* * Update instruction length as we may reinject #BP from @@ -5257,7 +5257,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } - /* fall through */ + fallthrough; case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; @@ -5610,7 +5610,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) * keeping track of global entries in shadow page tables. */ - /* fall-through */ + fallthrough; case INVPCID_TYPE_ALL_INCL_GLOBAL: kvm_mmu_unload(vcpu); return kvm_skip_emulated_instruction(vcpu); @@ -6054,6 +6054,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_APIC_ACCESS && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; @@ -6578,7 +6579,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ + fallthrough; case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); @@ -6588,7 +6589,7 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); - /* fall through */ + fallthrough; case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 26175a4759fa5f8eef34b443dcb3fd6415b4f6b6..a2f82127c1707ae71c0c2836bef6ed3898361f27 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); int vmx_find_msr_index(struct vmx_msrs *m, u32 msr); int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e); +void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); #define POSTED_INTR_ON 0 #define POSTED_INTR_SN 1 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 599d73206299c011b8e3b7271d9b8f2c048a1fcb..1994602a0851f2011bf237c329b18c0374e52bb1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -975,7 +975,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + X86_CR4_SMEP; if (kvm_valid_cr4(vcpu, cr4)) return 1; @@ -1116,14 +1116,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) vcpu->arch.eff_db[dr] = val; break; case 4: - /* fall through */ case 6: if (!kvm_dr6_valid(val)) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; case 5: - /* fall through */ default: /* 7 */ if (!kvm_dr7_valid(val)) return -1; /* #GP */ @@ -1154,12 +1152,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) *val = vcpu->arch.db[array_index_nospec(dr, size)]; break; case 4: - /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: - /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; @@ -2735,7 +2731,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 1; if (!lapic_in_kernel(vcpu)) - return 1; + return data ? 1 : 0; vcpu->arch.apf.msr_en_val = data; @@ -3051,7 +3047,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1: - pr = true; /* fall through */ + pr = true; + fallthrough; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1: if (kvm_pmu_is_valid_msr(vcpu, msr)) @@ -3581,6 +3578,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SMALLER_MAXPHYADDR: r = (int) allow_smaller_maxphyaddr; break; + case KVM_CAP_STEAL_TIME: + r = sched_info_on(); + break; default: break; } @@ -4359,7 +4359,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, case KVM_CAP_HYPERV_SYNIC2: if (cap->args[0]) return -EINVAL; - /* fall through */ + fallthrough; case KVM_CAP_HYPERV_SYNIC: if (!irqchip_in_kernel(vcpu->kvm)) @@ -8672,7 +8672,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; - /* fall through */ + fallthrough; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; @@ -10751,9 +10751,11 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) { struct x86_exception fault; + u32 access = error_code & + (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); if (!(error_code & PFERR_PRESENT_MASK) || - vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) { + vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { /* * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page * tables probably do not match the TLB. Just proceed diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index d46fff11f06fa36b402b87e378bbbea68345f5b2..aa067859a70b64c4ebeff1e279a9d3a4ee10cefc 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_cmdline.o = -pg endif -CFLAGS_cmdline.o := -fno-stack-protector +CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables endif inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c index 4f1719e22d3cb47f6c4eab50d28fcdab866687ed..b6da0933930859d577db66b6ea3e0e061c0554d9 100644 --- a/arch/x86/lib/cmdline.c +++ b/arch/x86/lib/cmdline.c @@ -58,7 +58,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size, state = st_wordcmp; opptr = option; wstart = pos; - /* fall through */ + fallthrough; case st_wordcmp: if (!*opptr) { @@ -89,7 +89,7 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size, break; } state = st_wordskip; - /* fall through */ + fallthrough; case st_wordskip: if (!c) @@ -151,7 +151,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size, state = st_wordcmp; opptr = option; - /* fall through */ + fallthrough; case st_wordcmp: if ((c == '=') && !*opptr) { @@ -172,7 +172,7 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size, break; } state = st_wordskip; - /* fall through */ + fallthrough; case st_wordskip: if (myisspace(c)) diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 31600d851fd8e3c327f524a87443debdee52be71..5e69603ff63ffa3c7e0de515d7de8a1f7a179bfd 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -179,7 +179,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) if (insn->addr_bytes == 2) return -EINVAL; - /* fall through */ + fallthrough; case -EDOM: case offsetof(struct pt_regs, bx): @@ -362,7 +362,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) case INAT_SEG_REG_GS: return vm86regs->gs; case INAT_SEG_REG_IGNORE: - /* fall through */ default: return -EINVAL; } @@ -386,7 +385,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) */ return get_user_gs(regs); case INAT_SEG_REG_IGNORE: - /* fall through */ default: return -EINVAL; } @@ -786,7 +784,7 @@ int insn_get_code_seg_params(struct pt_regs *regs) */ return INSN_CODE_SEG_PARAMS(4, 8); case 3: /* Invalid setting. CS.L=1, CS.D=1 */ - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c index 73dc66d887f354a78d9e48efcee214be22cb866a..ec071cbb080432301b92e16ab4ff7249b0c960c1 100644 --- a/arch/x86/math-emu/errors.c +++ b/arch/x86/math-emu/errors.c @@ -186,7 +186,7 @@ void FPU_printall(void) case TAG_Special: /* Update tagi for the printk below */ tagi = FPU_Special(r); - /* fall through */ + fallthrough; case TAG_Valid: printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, getsign(r) ? '-' : '+', diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c index 127ea54122d75f48b97633cfa96b589ada8f5f19..4a9887851ad8ab05e11fb427df846721bd66ff43 100644 --- a/arch/x86/math-emu/fpu_trig.c +++ b/arch/x86/math-emu/fpu_trig.c @@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag) case TW_Denormal: if (denormal_operand() < 0) return; - /* fall through */ + fallthrough; case TAG_Zero: case TAG_Valid: setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr)); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 35f1498e98324f50a8d6ca4f670a5e76470409a5..6e3e8a1249031193697833c3740f9749a9613d3f 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -190,6 +190,53 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) return pmd_k; } +/* + * Handle a fault on the vmalloc or module mapping area + * + * This is needed because there is a race condition between the time + * when the vmalloc mapping code updates the PMD to the point in time + * where it synchronizes this update with the other page-tables in the + * system. + * + * In this race window another thread/CPU can map an area on the same + * PMD, finds it already present and does not synchronize it with the + * rest of the system yet. As a result v[mz]alloc might return areas + * which are not mapped in every page-table in the system, causing an + * unhandled page-fault when they are accessed. + */ +static noinline int vmalloc_fault(unsigned long address) +{ + unsigned long pgd_paddr; + pmd_t *pmd_k; + pte_t *pte_k; + + /* Make sure we are in vmalloc area: */ + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "current" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + pgd_paddr = read_cr3_pa(); + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); + if (!pmd_k) + return -1; + + if (pmd_large(*pmd_k)) + return 0; + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + return -1; + + return 0; +} +NOKPROBE_SYMBOL(vmalloc_fault); + void arch_sync_kernel_mappings(unsigned long start, unsigned long end) { unsigned long addr; @@ -1110,6 +1157,37 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, */ WARN_ON_ONCE(hw_error_code & X86_PF_PK); +#ifdef CONFIG_X86_32 + /* + * We can fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + * + * Before doing this on-demand faulting, ensure that the + * fault is not any of the following: + * 1. A fault on a PTE with a reserved bit set. + * 2. A fault caused by a user-mode access. (Do not demand- + * fault kernel memory due to user-mode accesses). + * 3. A fault caused by a page-level protection violation. + * (A demand fault would be on a non-present page which + * would have X86_PF_PROT==0). + * + * This is only needed to close a race condition on x86-32 in + * the vmalloc mapping/unmapping code. See the comment above + * vmalloc_fault() for details. On x86-64 the race does not + * exist as the vmalloc mappings don't need to be synchronized + * there. + */ + if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { + if (vmalloc_fault(address) >= 0) + return; + } +#endif + /* Was the fault spurious, caused by lazy TLB invalidation? */ if (spurious_kernel_fault(hw_error_code, address)) return; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 84d85dbd1dad6f430c9bf174146dd377cffe59c6..9e5ccc56f8e0775e5a3fda8661d44cd5d8f7a760 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -574,7 +574,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr, /* For SEV, these areas are encrypted */ if (sev_active()) break; - /* Fallthrough */ + fallthrough; case E820_TYPE_PRAM: return true; diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index c5174b4e318b4811ca22aefcd5bcb447bd4bc640..683cd12f4793890d1a71a4939065c6a467ff1d5b 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, u64 addr, u64 max_addr, u64 size) { return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size, - 0, NULL, NUMA_NO_NODE); + 0, NULL, 0); } static int __init setup_emu2phys_nid(int *dfl_phys_nid) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1a3569b43aa5bd3b8abf96556bc068f44922dabf..0951b47e64c10b9923a02267041f270d525d50ed 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); load_new_mm_cr3(next->pgd, new_asid, true); - /* - * NB: This gets called via leave_mm() in the idle path - * where RCU functions differently. Tracing normally - * uses RCU, so we need to use the _rcuidle variant. - * - * (There is no good reason for this. The idle code should - * be rearranged to call this before rcu_idle_enter().) - */ - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ load_new_mm_cr3(next->pgd, new_asid, false); - /* See above wrt _rcuidle. */ - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); } /* Make sure we write CR3 before loaded_mm. */ diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 9f9aad42ccff527f3406d9bedae0b4734cefc7a5..89395a5049bbbbad59d871a6ed18b7705a1a1cb9 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -26,6 +26,7 @@ #include #include #include +#include #include static int xen_pcifront_enable_irq(struct pci_dev *dev) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index f6ea8f1a9d57ae6a20fd50a239b720f61b6f9d6e..d37ebe6e70d7a167d2ab388c149ec2482ff46f33 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include #include @@ -496,74 +495,6 @@ void __init efi_init(void) efi_print_memmap(); } -#if defined(CONFIG_X86_32) - -void __init efi_set_executable(efi_memory_desc_t *md, bool executable) -{ - u64 addr, npages; - - addr = md->virt_addr; - npages = md->num_pages; - - memrange_efi_to_native(&addr, &npages); - - if (executable) - set_memory_x(addr, npages); - else - set_memory_nx(addr, npages); -} - -void __init runtime_code_page_mkexec(void) -{ - efi_memory_desc_t *md; - - /* Make EFI runtime service code area executable */ - for_each_efi_memory_desc(md) { - if (md->type != EFI_RUNTIME_SERVICES_CODE) - continue; - - efi_set_executable(md, true); - } -} - -void __init efi_memory_uc(u64 addr, unsigned long size) -{ - unsigned long page_shift = 1UL << EFI_PAGE_SHIFT; - u64 npages; - - npages = round_up(size, page_shift) / page_shift; - memrange_efi_to_native(&addr, &npages); - set_memory_uc(addr, npages); -} - -void __init old_map_region(efi_memory_desc_t *md) -{ - u64 start_pfn, end_pfn, end; - unsigned long size; - void *va; - - start_pfn = PFN_DOWN(md->phys_addr); - size = md->num_pages << PAGE_SHIFT; - end = md->phys_addr + size; - end_pfn = PFN_UP(end); - - if (pfn_range_is_mapped(start_pfn, end_pfn)) { - va = __va(md->phys_addr); - - if (!(md->attribute & EFI_MEMORY_WB)) - efi_memory_uc((u64)(unsigned long)va, size); - } else - va = efi_ioremap(md->phys_addr, size, - md->type, md->attribute); - - md->virt_addr = (u64) (unsigned long) va; - if (!va) - pr_err("ioremap of 0x%llX failed!\n", - (unsigned long long)md->phys_addr); -} - -#endif - /* Merge contiguous regions of the same type and attribute */ static void __init efi_merge_regions(void) { diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 826ead67753da52877ca94cd2d6c351d09a03742..e06a199423c0fedd79cb9a4cc4173655ecf3956c 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -29,9 +29,35 @@ #include #include #include +#include #include #include +void __init efi_map_region(efi_memory_desc_t *md) +{ + u64 start_pfn, end_pfn, end; + unsigned long size; + void *va; + + start_pfn = PFN_DOWN(md->phys_addr); + size = md->num_pages << PAGE_SHIFT; + end = md->phys_addr + size; + end_pfn = PFN_UP(end); + + if (pfn_range_is_mapped(start_pfn, end_pfn)) { + va = __va(md->phys_addr); + + if (!(md->attribute & EFI_MEMORY_WB)) + set_memory_uc((unsigned long)va, md->num_pages); + } else { + va = ioremap_cache(md->phys_addr, size); + } + + md->virt_addr = (unsigned long)va; + if (!va) + pr_err("ioremap of 0x%llX failed!\n", md->phys_addr); +} + /* * To make EFI call EFI runtime service in physical addressing mode we need * prolog/epilog before/after the invocation to claim the EFI runtime service @@ -58,11 +84,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) return 0; } -void __init efi_map_region(efi_memory_desc_t *md) -{ - old_map_region(md); -} - void __init efi_map_region_fixed(efi_memory_desc_t *md) {} void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} @@ -107,6 +128,15 @@ efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size, void __init efi_runtime_update_mappings(void) { - if (__supported_pte_mask & _PAGE_NX) - runtime_code_page_mkexec(); + if (__supported_pte_mask & _PAGE_NX) { + efi_memory_desc_t *md; + + /* Make EFI runtime service code area executable */ + for_each_efi_memory_desc(md) { + if (md->type != EFI_RUNTIME_SERVICES_CODE) + continue; + + set_memory_x(md->virt_addr, md->num_pages); + } + } } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 413583f904a695365e460502ff8f9b76ca844aa9..6af4da1149bacfe58b8f45b9c9d8ef60da261abc 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -259,6 +259,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT; rodata = __pa(__start_rodata); pfn = rodata >> PAGE_SHIFT; + + pf = _PAGE_NX | _PAGE_ENC; if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) { pr_err("Failed to map kernel rodata 1:1\n"); return 1; diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 76cee341507b7330b3955bf71a2aa4be887d351b..b3b17d6c50f079997a84f673ffd53a6c641c5bc7 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -448,7 +448,7 @@ static void do_signal(struct pt_regs *regs) regs->areg[2] = -EINTR; break; } - /* fallthrough */ + fallthrough; case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; diff --git a/block/badblocks.c b/block/badblocks.c index 2e5f5697db358a4efd1199c1e35a1806543a15f2..d39056630d9c1de07d3923daeb8b80e6ab6a086e 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -525,7 +525,7 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, case 3: if (newline != '\n') return -EINVAL; - /* fall through */ + fallthrough; case 2: if (length <= 0) return -EINVAL; diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 68882b9b8f11fae420572eee256b3fd894258e7c..b791e2041e49b3bbdf1c3e41fc8ea1fb589e647a 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -void bfqg_and_blkg_get(struct bfq_group *bfqg) +static void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a4c0bec920cbde6bef9f61909efc353937de85d4..fa98470df3f0a35c61b453cf094f62f95d87a9d9 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4980,7 +4980,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) pr_err("bdi %s: bfq: bad prio class %d\n", bdi_dev_name(bfqq->bfqd->queue->backing_dev_info), ioprio_class); - /* fall through */ + fallthrough; case IOPRIO_CLASS_NONE: /* * No prio set, inherit CPU scheduling settings. @@ -5112,7 +5112,7 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, return &bfqg->async_bfqq[0][ioprio]; case IOPRIO_CLASS_NONE: ioprio = IOPRIO_NORM; - /* fall through */ + fallthrough; case IOPRIO_CLASS_BE: return &bfqg->async_bfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: @@ -5895,18 +5895,6 @@ static void bfq_finish_requeue_request(struct request *rq) struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd; - /* - * Requeue and finish hooks are invoked in blk-mq without - * checking whether the involved request is actually still - * referenced in the scheduler. To handle this fact, the - * following two checks make this function exit in case of - * spurious invocations, for which there is nothing to do. - * - * First, check whether rq has nothing to do with an elevator. - */ - if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) - return; - /* * rq either is not associated with any icq, or is an already * requeued request that has not (yet) been re-inserted into diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index cd224aaf9f52a541e8851b5f7cb92c7008b3bcf1..703895224562c69097d0968643c851bfdbf953eb 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -986,7 +986,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index eb0e2a6daabe63f30be59394f73e201e05f59488..26776bdbdf360c97635c6fb197b0b3c497689f2e 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -533,9 +533,7 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } else - bfqg_and_blkg_get(container_of(entity, struct bfq_group, - entity)); + } } /** @@ -649,14 +647,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st_or_in_serv = false; st->wsum -= entity->weight; - if (is_in_service) - return; - - if (bfqq) + if (bfqq && !is_in_service) bfq_put_queue(bfqq); - else - bfqg_and_blkg_put(container_of(entity, struct bfq_group, - entity)); } /** diff --git a/block/bio.c b/block/bio.c index c63ba04bd629675cf272523bb6ee1fc8052005bf..e865ea55b9f9a25e1d48bc6cdcae67e26b9afd48 100644 --- a/block/bio.c +++ b/block/bio.c @@ -740,8 +740,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) { - phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + - bv->bv_offset + bv->bv_len - 1; + size_t bv_end = bv->bv_offset + bv->bv_len; + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; phys_addr_t page_addr = page_to_phys(page); if (vec_end_addr + 1 != page_addr + off) @@ -750,9 +750,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return false; *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); - if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page) - return false; - return true; + if (*same_page) + return true; + return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } /* @@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) + if (bio->bi_iter.bi_size > UINT_MAX - len) { + *same_page = false; return false; + } bv->bv_len += len; bio->bi_iter.bi_size += len; return true; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 619a79b51068cd8aa8fa2a5ba4142c9123de4c18..c195365c98172e133972ccb899433ba5a4ba81c8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1152,13 +1152,15 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - ret = blk_iolatency_init(q); + ret = blk_throtl_init(q); if (ret) goto err_destroy_all; - ret = blk_throtl_init(q); - if (ret) + ret = blk_iolatency_init(q); + if (ret) { + blk_throtl_exit(q); goto err_destroy_all; + } return 0; err_destroy_all: diff --git a/block/blk-core.c b/block/blk-core.c index d9d632639bd185ac2e5d93b71f247cef56e1b997..10c08ac506978852a1d1b0d66d3663f7adde1226 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -539,6 +539,7 @@ struct request_queue *blk_alloc_queue(int node_id) goto fail_stats; q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; + q->backing_dev_info->io_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 413e0b5c8e6b08cb9e76af181b267bbed84e8ead..d37b55db24099152594cdde3084fc78519c29060 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2092,14 +2092,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd) { struct ioc_gq *iocg = pd_to_iocg(pd); struct ioc *ioc = iocg->ioc; + unsigned long flags; if (ioc) { - spin_lock(&ioc->lock); + spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { propagate_active_weight(iocg, 0, 0); list_del_init(&iocg->active_list); } - spin_unlock(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->delay_timer); diff --git a/block/blk-merge.c b/block/blk-merge.c index 6529e3aab001184a92dc83fc29228149e5a3c00f..f685d633bcc9b0b409233c1ead8306b06209c456 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, if (max_sectors > start_offset) return max_sectors - start_offset; - return sectors & (lbs - 1); + return sectors & ~(lbs - 1); } static inline unsigned get_max_segment_size(const struct request_queue *q, @@ -533,10 +533,17 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(__blk_rq_map_sg); +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) goto no_merge; if (blk_integrity_merge_bio(req->q, req, bio) == false) @@ -624,7 +631,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; - if (total_phys_segments > queue_max_segments(q)) + if (total_phys_segments > blk_rq_get_max_segments(req)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index a19cdf159b7536a6a941effa75da65869da7b076..d2790e5b06d1487fde47730e3bf7a6a7b712da1a 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -78,6 +78,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 126021fc3a11f9308a5fe0745ba2a6f9f1ee2ee5..e81ca1bf6e10b6dfcb3838f2424ee71ca89aea87 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.requeue_request) + if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) e->type->ops.requeue_request(rq); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 0015a1892153ad59e0d04c61dc041efe4bc946ab..b3d2785eefe930692416d8540a7897e8819f9d3b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1437,6 +1437,15 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another @@ -1834,6 +1843,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. + * @at_head: true if the request should be inserted at the head of the list. * @run_queue: If we should run the hardware queue after inserting the request. * * Should only be used carefully, when the caller knows we want to @@ -2016,7 +2026,8 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, false, run_queue); + blk_mq_sched_insert_request(rq, false, run_queue, false); + return BLK_STS_OK; } diff --git a/block/blk-stat.c b/block/blk-stat.c index 7da302ff88d0d512151b9d82e08744581a864dc6..ae3dd1fb8e61d7b88c581d22dc6682bdb0ca994f 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -137,6 +137,7 @@ void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) { unsigned int bucket; + unsigned long flags; int cpu; for_each_possible_cpu(cpu) { @@ -147,20 +148,22 @@ void blk_stat_add_callback(struct request_queue *q, blk_rq_stat_init(&cpu_stat[bucket]); } - spin_lock(&q->stats->lock); + spin_lock_irqsave(&q->stats->lock, flags); list_add_tail_rcu(&cb->list, &q->stats->callbacks); blk_queue_flag_set(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); } void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { - spin_lock(&q->stats->lock); + unsigned long flags; + + spin_lock_irqsave(&q->stats->lock, flags); list_del_rcu(&cb->list); if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) blk_queue_flag_clear(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); del_timer_sync(&cb->timer); } @@ -183,10 +186,12 @@ void blk_stat_free_callback(struct blk_stat_callback *cb) void blk_stat_enable_accounting(struct request_queue *q) { - spin_lock(&q->stats->lock); + unsigned long flags; + + spin_lock_irqsave(&q->stats->lock, flags); q->stats->enable_accounting = true; blk_queue_flag_set(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); } EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 0fa615eefd52639cc25490d9534310f27b1f5da7..fd410086fe1de59b02e926dd364b9253a05a6311 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -528,7 +528,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE)) return false; - /* fallthrough */ + fallthrough; case REQ_OP_DISCARD: return true; default: diff --git a/block/bsg-lib.c b/block/bsg-lib.c index fb7b347f80105b142c3577f0c0bde4fc72583c6a..d185396d88bbc5ed8d240cad9a5deb89a24dbb75 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -378,7 +378,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, bset->timeout_fn = timeout; set = &bset->tag_set; - set->ops = &bsg_mq_ops, + set->ops = &bsg_mq_ops; set->nr_hw_queues = 1; set->queue_depth = 128; set->numa_node = NUMA_NO_NODE; diff --git a/block/ioprio.c b/block/ioprio.c index 77bcab11dce5794531abf58f1d6e7957ea06c887..04ebd37966f181eee55282e67bc96c696de2fdc2 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -71,7 +71,7 @@ int ioprio_check_cap(int ioprio) case IOPRIO_CLASS_RT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; /* rt has prio field too */ case IOPRIO_CLASS_BE: if (data >= IOPRIO_BE_NR || data < 0) diff --git a/block/partitions/core.c b/block/partitions/core.c index e62a98a8eeb75039ea1e8341356b7d4aa4112aa0..722406b841dfc7b44a76be504afc2495e175ef3d 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -278,6 +278,15 @@ static void hd_struct_free_work(struct work_struct *work) { struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, rcu_work); + struct gendisk *disk = part_to_disk(part); + + /* + * Release the disk reference acquired in delete_partition here. + * We can't release it in hd_struct_free because the final put_device + * needs process context and thus can't be run directly from a + * percpu_ref ->release handler. + */ + put_device(disk_to_dev(disk)); part->start_sect = 0; part->nr_sects = 0; @@ -293,7 +302,6 @@ static void hd_struct_free(struct percpu_ref *ref) rcu_dereference_protected(disk->part_tbl, 1); rcu_assign_pointer(ptbl->last_lookup, NULL); - put_device(disk_to_dev(disk)); INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work); queue_rcu_work(system_wq, &part->rcu_work); @@ -524,19 +532,20 @@ int bdev_add_partition(struct block_device *bdev, int partno, int bdev_del_partition(struct block_device *bdev, int partno) { struct block_device *bdevp; - struct hd_struct *part; - int ret = 0; + struct hd_struct *part = NULL; + int ret; - part = disk_get_part(bdev->bd_disk, partno); - if (!part) - return -ENXIO; - - ret = -ENOMEM; - bdevp = bdget(part_devt(part)); + bdevp = bdget_disk(bdev->bd_disk, partno); if (!bdevp) - goto out_put_part; + return -ENXIO; mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + + ret = -ENXIO; + part = disk_get_part(bdev->bd_disk, partno); + if (!part) + goto out_unlock; ret = -EBUSY; if (bdevp->bd_openers) @@ -545,16 +554,14 @@ int bdev_del_partition(struct block_device *bdev, int partno) sync_blockdev(bdevp); invalidate_bdev(bdevp); - mutex_lock_nested(&bdev->bd_mutex, 1); delete_partition(bdev->bd_disk, part); - mutex_unlock(&bdev->bd_mutex); - ret = 0; out_unlock: + mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); -out_put_part: - disk_put_part(part); + if (part) + disk_put_part(part); return ret; } diff --git a/crypto/af_alg.c b/crypto/af_alg.c index a6f581ab200c12c2d464dfe077ebfc2a0b18e0e2..8be8bec07cdd2c7a14f9a65bde64b4ed0b992e94 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -845,9 +846,15 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, } lock_sock(sk); - if (ctx->init && (init || !ctx->more)) { - err = -EINVAL; - goto unlock; + if (ctx->init && !ctx->more) { + if (ctx->used) { + err = -EINVAL; + goto unlock; + } + + pr_info_once( + "%s sent an empty control message without MSG_MORE.\n", + current->comm); } ctx->init = true; diff --git a/crypto/drbg.c b/crypto/drbg.c index e99fe34cfa0054046d2a4226b5c8dff50169b266..3132967a1749721e2ef8658a2e04fbfeebe315c0 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1521,7 +1521,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) case -EALREADY: err = 0; - /* fall through */ + fallthrough; default: drbg->random_ready.func = NULL; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ba0b7702f2e91be21359c12ef583c20b890349c5..12e82a61b8961c230f6d72edcaa030ed0878ffaf 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2348,121 +2348,121 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed(alg, sec, generic_hash_speed_template); break; } - /* fall through */ + fallthrough; case 301: test_hash_speed("md4", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 302: test_hash_speed("md5", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 303: test_hash_speed("sha1", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 304: test_hash_speed("sha256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 305: test_hash_speed("sha384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 306: test_hash_speed("sha512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 307: test_hash_speed("wp256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 308: test_hash_speed("wp384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 309: test_hash_speed("wp512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 310: test_hash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 311: test_hash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 312: test_hash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 313: test_hash_speed("sha224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 314: test_hash_speed("rmd128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 315: test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 316: test_hash_speed("rmd256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 317: test_hash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 318: test_hash_speed("ghash-generic", sec, hash_speed_template_16); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 319: test_hash_speed("crc32c", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 320: test_hash_speed("crct10dif", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 321: test_hash_speed("poly1305", sec, poly1305_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 322: test_hash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 323: test_hash_speed("sha3-256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 324: test_hash_speed("sha3-384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 325: test_hash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 326: test_hash_speed("sm3", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 327: test_hash_speed("streebog256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 328: test_hash_speed("streebog512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; - /* fall through */ + fallthrough; case 399: break; @@ -2471,121 +2471,121 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed(alg, sec, generic_hash_speed_template); break; } - /* fall through */ + fallthrough; case 401: test_ahash_speed("md4", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 402: test_ahash_speed("md5", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 403: test_ahash_speed("sha1", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 404: test_ahash_speed("sha256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 405: test_ahash_speed("sha384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 406: test_ahash_speed("sha512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 407: test_ahash_speed("wp256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 408: test_ahash_speed("wp384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 409: test_ahash_speed("wp512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 410: test_ahash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 411: test_ahash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 412: test_ahash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 413: test_ahash_speed("sha224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 414: test_ahash_speed("rmd128", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 415: test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 416: test_ahash_speed("rmd256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 417: test_ahash_speed("rmd320", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 418: test_ahash_speed("sha3-224", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 419: test_ahash_speed("sha3-256", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 420: test_ahash_speed("sha3-384", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 421: test_ahash_speed("sha3-512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 422: test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 423: test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 424: test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 425: test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 426: test_mb_ahash_speed("streebog256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 427: test_mb_ahash_speed("streebog512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; - /* fall through */ + fallthrough; case 499: break; diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index c2b452af68068465abeb06abdc4ebbbd8acd21d3..9861302cc7dbb92200792f90f3ee69f135e32289 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c @@ -290,7 +290,7 @@ static int vt_notifier_call(struct notifier_block *blk, break; case '\t': c = ' '; - /* Fallthrough */ + fallthrough; default: if (c < 32) /* Ignore other control sequences */ diff --git a/drivers/accessibility/speakup/Kconfig b/drivers/accessibility/speakup/Kconfig index 0803c2013cf4d476cdb9136d165f95332c595d2e..07ecbbde0384123d22509a37c01a117273507546 100644 --- a/drivers/accessibility/speakup/Kconfig +++ b/drivers/accessibility/speakup/Kconfig @@ -42,6 +42,11 @@ config SPEAKUP one of the listed synthesizers, you should say n. if SPEAKUP + +config SPEAKUP_SERIALIO + def_bool y + depends on ISA || COMPILE_TEST + config SPEAKUP_SYNTH_ACNTSA tristate "Accent SA synthesizer support" help @@ -52,7 +57,7 @@ config SPEAKUP_SYNTH_ACNTSA config SPEAKUP_SYNTH_ACNTPC tristate "Accent PC synthesizer support" - depends on ISA || COMPILE_TEST + depends on SPEAKUP_SERIALIO help This is the Speakup driver for the accent pc synthesizer. You can say y to build it into the kernel, @@ -104,7 +109,7 @@ config SPEAKUP_SYNTH_DECEXT config SPEAKUP_SYNTH_DECPC depends on m - depends on ISA || COMPILE_TEST + depends on SPEAKUP_SERIALIO tristate "DECtalk PC (big ISA card) synthesizer support" help @@ -127,7 +132,7 @@ config SPEAKUP_SYNTH_DECPC config SPEAKUP_SYNTH_DTLK tristate "DoubleTalk PC synthesizer support" - depends on ISA || COMPILE_TEST + depends on SPEAKUP_SERIALIO help This is the Speakup driver for the internal DoubleTalk @@ -138,7 +143,7 @@ config SPEAKUP_SYNTH_DTLK config SPEAKUP_SYNTH_KEYPC tristate "Keynote Gold PC synthesizer support" - depends on ISA || COMPILE_TEST + depends on SPEAKUP_SERIALIO help This is the Speakup driver for the Keynote Gold diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile index 5befb4933b850e0cd2fdc9c5bb082a2ba9fa6579..6e4bfac8af650de79d4ce2020fc507db699ecb0a 100644 --- a/drivers/accessibility/speakup/Makefile +++ b/drivers/accessibility/speakup/Makefile @@ -25,8 +25,8 @@ speakup-y := \ keyhelp.o \ kobjects.o \ selection.o \ - serialio.o \ spk_ttyio.o \ synth.o \ thread.o \ varhandlers.o +speakup-$(CONFIG_SPEAKUP_SERIALIO) += serialio.o diff --git a/drivers/accessibility/speakup/serialio.c b/drivers/accessibility/speakup/serialio.c index 177a2988641c16ea4922ea3de6e0b0a7ee06df2e..403b01d66367e69651afeffa60dfc155a25f1aac 100644 --- a/drivers/accessibility/speakup/serialio.c +++ b/drivers/accessibility/speakup/serialio.c @@ -32,6 +32,7 @@ static void spk_serial_tiocmset(unsigned int set, unsigned int clear); static unsigned char spk_serial_in(void); static unsigned char spk_serial_in_nowait(void); static void spk_serial_flush_buffer(void); +static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth); struct spk_io_ops spk_serial_io_ops = { .synth_out = spk_serial_out, @@ -40,6 +41,7 @@ struct spk_io_ops spk_serial_io_ops = { .synth_in = spk_serial_in, .synth_in_nowait = spk_serial_in_nowait, .flush_buffer = spk_serial_flush_buffer, + .wait_for_xmitr = spk_serial_wait_for_xmitr, }; EXPORT_SYMBOL_GPL(spk_serial_io_ops); @@ -211,7 +213,7 @@ void spk_stop_serial_interrupt(void) } EXPORT_SYMBOL_GPL(spk_stop_serial_interrupt); -int spk_wait_for_xmitr(struct spk_synth *in_synth) +static int spk_serial_wait_for_xmitr(struct spk_synth *in_synth) { int tmout = SPK_XMITR_TIMEOUT; @@ -280,7 +282,7 @@ static void spk_serial_flush_buffer(void) static int spk_serial_out(struct spk_synth *in_synth, const char ch) { - if (in_synth->alive && spk_wait_for_xmitr(in_synth)) { + if (in_synth->alive && spk_serial_wait_for_xmitr(in_synth)) { outb_p(ch, speakup_info.port_tts); return 1; } @@ -295,7 +297,7 @@ const char *spk_serial_synth_immediate(struct spk_synth *synth, while ((ch = *buff)) { if (ch == '\n') ch = synth->procspeech; - if (spk_wait_for_xmitr(synth)) + if (spk_serial_wait_for_xmitr(synth)) outb(ch, speakup_info.port_tts); else return buff; diff --git a/drivers/accessibility/speakup/spk_priv.h b/drivers/accessibility/speakup/spk_priv.h index c75b40838794710f8d44bd19651124e3ecf85aeb..0f4bcbe5ddb93974fcfbe7979d8cff60f8b0c403 100644 --- a/drivers/accessibility/speakup/spk_priv.h +++ b/drivers/accessibility/speakup/spk_priv.h @@ -34,7 +34,6 @@ const struct old_serial_port *spk_serial_init(int index); void spk_stop_serial_interrupt(void); -int spk_wait_for_xmitr(struct spk_synth *in_synth); void spk_serial_release(void); void spk_ttyio_release(void); void spk_ttyio_register_ldisc(void); diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c index 9b95f77f926573ad0056b6b1a249e9e71f845e87..a831ff64f8ba579e9acd967f7906e43a633dac6d 100644 --- a/drivers/accessibility/speakup/spk_ttyio.c +++ b/drivers/accessibility/speakup/spk_ttyio.c @@ -116,6 +116,7 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear); static unsigned char spk_ttyio_in(void); static unsigned char spk_ttyio_in_nowait(void); static void spk_ttyio_flush_buffer(void); +static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth); struct spk_io_ops spk_ttyio_ops = { .synth_out = spk_ttyio_out, @@ -125,6 +126,7 @@ struct spk_io_ops spk_ttyio_ops = { .synth_in = spk_ttyio_in, .synth_in_nowait = spk_ttyio_in_nowait, .flush_buffer = spk_ttyio_flush_buffer, + .wait_for_xmitr = spk_ttyio_wait_for_xmitr, }; EXPORT_SYMBOL_GPL(spk_ttyio_ops); @@ -286,6 +288,11 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) mutex_unlock(&speakup_tty_mutex); } +static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth) +{ + return 1; +} + static unsigned char ttyio_in(int timeout) { struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data; diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h index d3272c6d199aaabe08d0caef0dc53a1dbd378357..7398f1196e103732845570dd83cdf3ca1d6698ac 100644 --- a/drivers/accessibility/speakup/spk_types.h +++ b/drivers/accessibility/speakup/spk_types.h @@ -158,6 +158,7 @@ struct spk_io_ops { unsigned char (*synth_in)(void); unsigned char (*synth_in_nowait)(void); void (*flush_buffer)(void); + int (*wait_for_xmitr)(struct spk_synth *synth); }; struct spk_synth { diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c index 3568bfb89912c3316d649b6c19223f4206936457..ac47dbac72075d03f7a67c51c8c8ce66c9351f6f 100644 --- a/drivers/accessibility/speakup/synth.c +++ b/drivers/accessibility/speakup/synth.c @@ -159,7 +159,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth) { if (synth->alive) return 1; - if (spk_wait_for_xmitr(synth) > 0) { + if (synth->io_ops->wait_for_xmitr(synth) > 0) { /* restart */ synth->alive = 1; synth_printf("%s", synth->init); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 4c348377a39d87419c4bdbfcdff9b8e2ed2713e4..806b8ce05624b1772f1585790c6f0edd28c5a32e 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -99,8 +99,8 @@ static int fch_misc_setup(struct apd_private_data *pdata) if (ret < 0) return -ENOENT; - acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj); - clk_data->is_rv = obj->integer.value; + if (!acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj)) + clk_data->is_rv = obj->integer.value; list_for_each_entry(rentry, &resource_list, node) { clk_data->base = devm_ioremap(&adev->dev, rentry->res->start, diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 6ad8cb05f672fe630237f9cd218573eb11748fb9..4a0b07792233ef5208b67fa9eb7b2df6e4b2b2f2 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -350,7 +350,7 @@ void __iomem __ref pg_off = round_down(phys, PAGE_SIZE); pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; - virt = acpi_map(pg_off, pg_sz); + virt = acpi_map(phys, size); if (!virt) { mutex_unlock(&acpi_ioremap_lock); kfree(map); @@ -358,7 +358,7 @@ void __iomem __ref } INIT_LIST_HEAD(&map->list); - map->virt = virt; + map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); map->phys = pg_off; map->size = pg_sz; map->track.refcount = 1; @@ -1575,11 +1575,26 @@ static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, u32 level) { + acpi_status status; + if (!(res->flags & IORESOURCE_MEM)) return AE_TYPE; - return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, - acpi_deactivate_mem_region, NULL, res, NULL); + status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, + acpi_deactivate_mem_region, NULL, + res, NULL); + if (ACPI_FAILURE(status)) + return status; + + /* + * Wait for all of the mappings queued up for removal by + * acpi_deactivate_mem_region() to actually go away. + */ + synchronize_rcu(); + rcu_barrier(); + flush_scheduled_work(); + + return AE_OK; } EXPORT_SYMBOL_GPL(acpi_release_memory); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 0c0a736eb8613d16e9ef597c7ce179d2595bd169..fbd8eaa32d32505cb26991a16edbbf9b3388b363 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -807,8 +807,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, (sstatus & 0xf) != 1) break; - ata_link_printk(link, KERN_INFO, "avn bounce port%d\n", - port); + ata_link_info(link, "avn bounce port%d\n", port); pci_read_config_word(pdev, 0x92, &val); val &= ~(1 << port); diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index 6853dbb4131d8765ff8f1b013f0ba29e6e67bc22..49f7acbfcf01e91d7bd42277292441fc090320f1 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -470,7 +470,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) switch (priv->version) { case BRCM_SATA_BCM7425: hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE; - /* fall through */ + fallthrough; case BRCM_SATA_NSP: hpriv->flags |= AHCI_HFLAG_NO_NCQ; priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE; diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 129556fcf6be760ce48834873ea3ea057c4bd8a5..86261deeb4c58d48ff866b578f62e6c82f9cd1e1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -326,7 +326,7 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, node); break; } - /* fall through */ + fallthrough; case -ENODEV: /* continue normally */ hpriv->phys[port] = NULL; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b1cd4d97bc2a75994361aa88e7c86a305aa8f5d7..f546a5761c4f2c419ddb1d24fd0e8972ea633324 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -190,7 +190,7 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, case ATA_LITER_PMP_FIRST: if (sata_pmp_attached(ap)) return ap->pmp_link; - /* fall through */ + fallthrough; case ATA_LITER_HOST_FIRST: return &ap->link; } @@ -201,11 +201,11 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, case ATA_LITER_HOST_FIRST: if (sata_pmp_attached(ap)) return ap->pmp_link; - /* fall through */ + fallthrough; case ATA_LITER_PMP_FIRST: if (unlikely(ap->slave_link)) return ap->slave_link; - /* fall through */ + fallthrough; case ATA_LITER_EDGE: return NULL; } @@ -523,7 +523,7 @@ int atapi_cmd_type(u8 opcode) case ATA_12: if (atapi_passthru16) return ATAPI_PASS_THRU; - /* fall thru */ + fallthrough; default: return ATAPI_MISC; } @@ -1800,7 +1800,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, switch (class) { case ATA_DEV_SEMB: class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ - /* fall through */ + fallthrough; case ATA_DEV_ATA: case ATA_DEV_ZAC: tf.command = ATA_CMD_ID_ATA; @@ -2907,7 +2907,7 @@ int ata_bus_probe(struct ata_port *ap) case -ENODEV: /* give it just one more chance */ tries[dev->devno] = min(tries[dev->devno], 1); - /* fall through */ + fallthrough; case -EIO: if (tries[dev->devno] == 1) { /* This is the last chance, better to slow @@ -3158,7 +3158,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) case ATA_DNXFER_FORCE_PIO0: pio_mask &= 1; - /* fall through */ + fallthrough; case ATA_DNXFER_FORCE_PIO: mwdma_mask = 0; udma_mask = 0; @@ -3868,9 +3868,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, @@ -4694,7 +4693,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) qc->tf.feature != SETFEATURES_RA_ON && qc->tf.feature != SETFEATURES_RA_OFF) break; - /* fall through */ + fallthrough; case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ case ATA_CMD_SET_MULTI: /* multi_count changed */ /* revalidate device */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 474c6c34fe022dc92f5d5f4e8c7e864bc69cc2f6..d912eaa65c943ebd9c4e325535cc356878b8c655 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1576,7 +1576,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, case ATA_DEV_ZAC: if (stat & ATA_SENSE) ata_eh_request_sense(qc, qc->scsicmd); - /* fall through */ + fallthrough; case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; @@ -3473,11 +3473,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) case -ENODEV: /* device missing or wrong IDENTIFY data, schedule probing */ ehc->i.probe_mask |= (1 << dev->devno); - /* fall through */ + fallthrough; case -EINVAL: /* give it just one more chance */ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); - /* fall through */ + fallthrough; case -EIO: if (ehc->tries[dev->devno] == 1) { /* This is the last chance, better to slow diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index ec233208585bd4ad58a4feab9b7d3fb442f07f1c..70431912dc6352d53b630e1aaaa39bd212f51ade 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2080,6 +2080,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2105,7 +2106,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } @@ -4162,7 +4168,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6); break; } - /* Fallthrough */ + fallthrough; default: ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); break; @@ -4198,7 +4204,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) * turning this into a no-op. */ case SYNCHRONIZE_CACHE: - /* fall through */ + fallthrough; /* no-op's, complete with success */ case REZERO_UNIT: diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index e01a3a6e4d462a3ed1089ad6ff52cea21252d5ab..2bc5fc81efe30c8fc45c53bd67185a171775dc07 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -157,7 +157,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap, default: printk(KERN_WARNING "ATP867X: active %dclk is invalid. " "Using 12clk.\n", clk); - /* fall through */ + fallthrough; case 9 ... 12: clocks = 7; /* 12 clk */ break; @@ -190,7 +190,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk) default: printk(KERN_WARNING "ATP867X: recover %dclk is invalid. " "Using default 12clk.\n", clk); - /* fall through */ + fallthrough; case 12: /* default 12 clk */ clocks = 0; break; diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 916bf024d737542e9770c833113b9a4d853215bb..7511e11eef4d6af0799914b0bf654e7c38f4ee05 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -369,7 +369,7 @@ static int serverworks_fixup(struct pci_dev *pdev) break; case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: ata_pci_bmdma_clear_simplex(pdev); - /* fall through */ + fallthrough; case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: rc = serverworks_fixup_csb(pdev); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index d7228f8e9297c001e730c7f62c8901ebad20715f..664ef658a955f93ba4743c469a097cbb7eb3ddf3 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2010,7 +2010,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) break; case ATA_CMD_WRITE_MULTI_FUA_EXT: tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ - /* fall through */ + fallthrough; case ATA_CMD_WRITE_MULTI_EXT: tf->command = ATA_CMD_PIO_WRITE_EXT; break; @@ -2044,7 +2044,7 @@ static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) return AC_ERR_OK; - /* fall-thru */ + fallthrough; case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: @@ -2296,7 +2296,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATAPI_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; - /* fall through */ + fallthrough; case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_FIRST; break; @@ -2347,7 +2347,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return AC_ERR_OTHER; break; /* use bmdma for this */ } - /* fall thru */ + fallthrough; case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; @@ -2376,7 +2376,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) ": attempting PIO w/multiple DRQ: " "this may fail due to h/w errata\n"); } - /* fall through */ + fallthrough; case ATA_PROT_NODATA: case ATAPI_PROT_PIO: case ATAPI_PROT_NODATA: @@ -3864,7 +3864,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); } - /* fall through */ + fallthrough; case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 8729f78cef5fdb1243508b0de7b286abd9420968..7815da8ef9e58917305622168813693be7b30256 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -637,7 +637,7 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_DMA: pdc_fill_sg(qc); - /*FALLTHROUGH*/ + fallthrough; case ATA_PROT_NODATA: i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, qc->dev->devno, pp->pkt); @@ -652,7 +652,7 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) break; case ATAPI_PROT_DMA: pdc_fill_sg(qc); - /*FALLTHROUGH*/ + fallthrough; case ATAPI_PROT_NODATA: pdc_atapi_pkt(qc); break; @@ -1022,11 +1022,11 @@ static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc) case ATAPI_PROT_NODATA: if (qc->dev->flags & ATA_DFLAG_CDB_INTR) break; - /*FALLTHROUGH*/ + fallthrough; case ATA_PROT_NODATA: if (qc->tf.flags & ATA_TFLAG_POLLING) break; - /*FALLTHROUGH*/ + fallthrough; case ATAPI_PROT_DMA: case ATA_PROT_DMA: pdc_packet_start(qc); diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 2c7b30c5ea3dd8a7f90fa10405957212e37c0e0f..4c01190a5e370e1e86793feae8f8f9dbef3b22e7 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -669,7 +669,7 @@ static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc) case ATA_PROT_NODATA: if (qc->tf.flags & ATA_TFLAG_POLLING) break; - /*FALLTHROUGH*/ + fallthrough; case ATA_PROT_DMA: pdc20621_packet_start(qc); return 0; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 2ca9ec8027342996ded892562f737b3a1864e21f..0ddd611b427766f4f4d01eb17334a1e7ff13124b 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -711,7 +711,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q) switch (STATUS_CODE (qe)) { case 0x01: /* This is for AAL0 where we put the chip in streaming mode */ - /* Fall through */ + fallthrough; case 0x02: /* Process a real txdone entry. */ tmp = qe->p0; @@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc) error = make_rate (pcr, r, &tmc0, NULL); if (error) { kfree(tc); + kfree(vcc); return error; } } diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index a81bc49c14acc6774dd04162ec9d06782476adaa..9a70bee841251d881ea43cebdf75e1a1e6b7910d 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -376,33 +376,33 @@ fore200e_shutdown(struct fore200e* fore200e) case FORE200E_STATE_COMPLETE: kfree(fore200e->stats); - /* fall through */ + fallthrough; case FORE200E_STATE_IRQ: free_irq(fore200e->irq, fore200e->atm_dev); - /* fall through */ + fallthrough; case FORE200E_STATE_ALLOC_BUF: fore200e_free_rx_buf(fore200e); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_BSQ: fore200e_uninit_bs_queue(fore200e); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_RXQ: fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_TXQ: fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); - /* fall through */ + fallthrough; case FORE200E_STATE_INIT_CMDQ: fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); - /* fall through */ + fallthrough; case FORE200E_STATE_INITIALIZE: /* nothing to do for that state */ @@ -415,7 +415,7 @@ fore200e_shutdown(struct fore200e* fore200e) case FORE200E_STATE_MAP: fore200e->bus->unmap(fore200e); - /* fall through */ + fallthrough; case FORE200E_STATE_CONFIGURE: /* nothing to do for that state */ diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 8af793f5e811322af3d933b911419c49211e1da3..17f44abc9418ead4d9cc9c6ffb6e26060724d506 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -1944,14 +1944,14 @@ he_tasklet(unsigned long data) switch (type) { case ITYPE_RBRQ_THRESH: HPRINTK("rbrq%d threshold\n", group); - /* fall through */ + fallthrough; case ITYPE_RBRQ_TIMER: if (he_service_rbrq(he_dev, group)) he_service_rbpl(he_dev, group); break; case ITYPE_TBRQ_THRESH: HPRINTK("tbrq%d threshold\n", group); - /* fall through */ + fallthrough; case ITYPE_TPD_COMPLETE: he_service_tbrq(he_dev, group); break; diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 63871859e6e8ed85d56a06f81ee67c96906275ed..3c081b6171a8fc39ffd3ac2535533861ffc14d9f 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -192,7 +192,7 @@ static int idt77105_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) switch (cmd) { case IDT77105_GETSTATZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; case IDT77105_GETSTAT: return fetch_stats(dev, arg, cmd == IDT77105_GETSTATZ); case ATM_SETLOOP: diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 986c1313694c66f3b79b7a853d4fd5b600942c5a..ac811cfa6843108644c94d0fddb565be3de260c5 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2019,7 +2019,7 @@ static int lanai_normalize_ci(struct lanai_dev *lanai, switch (*vpip) { case ATM_VPI_ANY: *vpip = 0; - /* FALLTHROUGH */ + fallthrough; case 0: break; default: diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index ee059c77e3bbc717e45d88b96974f798fbbbacb5..cf5fffcf98a1e6db5d25b300d4938733c9088fc6 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1447,7 +1447,7 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) switch (cmd) { case ZATM_GETPOOLZ: if (!capable(CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; case ZATM_GETPOOL: { struct zatm_pool_info info; diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c index dea031484cc4874a4ccad0f47b45ff5208a05088..0b1c99cca7334ff14b497f9c95709d1bacff0b9f 100644 --- a/drivers/auxdisplay/arm-charlcd.c +++ b/drivers/auxdisplay/arm-charlcd.c @@ -2,7 +2,7 @@ /* * Driver for the on-board character LCD found on some ARM reference boards * This is basically an Hitachi HD44780 LCD with a custom IP block to drive it - * http://en.wikipedia.org/wiki/HD44780_Character_LCD + * https://en.wikipedia.org/wiki/HD44780_Character_LCD * Currently it will just display the text "ARM Linux" and the linux version * * Author: Linus Walleij diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 99980aa3644bc3ffd4e7cf75cc307ad7e13df23e..1c82d824ae0078ea57d1cf3810f04bfd4e682dc0 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1365,7 +1365,7 @@ static void panel_process_inputs(void) break; input->rise_timer = 0; input->state = INPUT_ST_RISING; - /* fall through */ + fallthrough; case INPUT_ST_RISING: if ((phys_curr & input->mask) != input->value) { input->state = INPUT_ST_LOW; @@ -1378,11 +1378,11 @@ static void panel_process_inputs(void) } input->high_timer = 0; input->state = INPUT_ST_HIGH; - /* fall through */ + fallthrough; case INPUT_ST_HIGH: if (input_state_high(input)) break; - /* fall through */ + fallthrough; case INPUT_ST_FALLING: input_state_falling(input); } diff --git a/drivers/base/core.c b/drivers/base/core.c index ac1046a382bc027153be5148424ffde43c3b320e..bb5806a2bd4ca7cc1920096104a7eb2fe8b5d8e2 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -807,9 +807,7 @@ static void device_link_put_kref(struct device_link *link) void device_link_del(struct device_link *link) { device_links_write_lock(); - device_pm_lock(); device_link_put_kref(link); - device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); @@ -830,7 +828,6 @@ void device_link_remove(void *consumer, struct device *supplier) return; device_links_write_lock(); - device_pm_lock(); list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer == consumer) { @@ -839,7 +836,6 @@ void device_link_remove(void *consumer, struct device *supplier) } } - device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_remove); @@ -4237,10 +4233,10 @@ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) vaf.va = &args; if (err != -EPROBE_DEFER) { - dev_err(dev, "error %d: %pV", err, &vaf); + dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); } else { device_set_deferred_probe_reason(dev, &vaf); - dev_dbg(dev, "error %d: %pV", err, &vaf); + dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); } va_end(args); @@ -4264,9 +4260,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { - if (fwnode) { - struct fwnode_handle *fn = dev->fwnode; + struct fwnode_handle *fn = dev->fwnode; + if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; @@ -4276,8 +4272,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } dev->fwnode = fwnode; } else { - dev->fwnode = fwnode_is_primary(dev->fwnode) ? - dev->fwnode->secondary : NULL; + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index 5327bfc6ba719c333af665ff526cc5dbd9ae3f68..283ca2de76d45ea26a772c4b9330a61580ba269b 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -289,10 +289,10 @@ static ssize_t firmware_loading_store(struct device *dev, } break; } - /* fallthrough */ + fallthrough; default: dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); - /* fallthrough */ + fallthrough; case -1: fw_load_abort(fw_sysfs); break; diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 933e2192fbe8a13a7ed72f9c83587a85c8d8f984..d08efc77cf16a0f3d75ddc95ee6f92558e2ad2a2 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h @@ -142,10 +142,12 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags); void fw_free_paged_buf(struct fw_priv *fw_priv); int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); int fw_map_paged_buf(struct fw_priv *fw_priv); +bool fw_is_paged_buf(struct fw_priv *fw_priv); #else static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } +static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; } #endif #endif /* __FIRMWARE_LOADER_H */ diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 9da0c9d5f538ea2ce87f1dc7d52bf05a4fce92b6..63b9714a0154855c9eacd1f3e3e5982a2a179e64 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref) list_del(&fw_priv->list); spin_unlock(&fwc->lock); - fw_free_paged_buf(fw_priv); /* free leftover pages */ - if (!fw_priv->allocated_size) + if (fw_is_paged_buf(fw_priv)) + fw_free_paged_buf(fw_priv); + else if (!fw_priv->allocated_size) vfree(fw_priv->data); + kfree_const(fw_priv->fw_name); kfree(fw_priv); } @@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv) } #ifdef CONFIG_FW_LOADER_PAGED_BUF +bool fw_is_paged_buf(struct fw_priv *fw_priv) +{ + return fw_priv->is_paged_buf; +} + void fw_free_paged_buf(struct fw_priv *fw_priv) { int i; @@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv) if (!fw_priv->pages) return; + vunmap(fw_priv->data); + for (i = 0; i < fw_priv->nr_pages; i++) __free_page(fw_priv->pages[i]); kvfree(fw_priv->pages); @@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv) if (!fw_priv->data) return -ENOMEM; - /* page table is no longer needed after mapping, let's free */ - kvfree(fw_priv->pages); - fw_priv->pages = NULL; - return 0; } #endif diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9dd85bea40260a951a805a6c44100c7b6e500149..205a06752ca90c4bfe601477cc203c5dc6007302 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1606,13 +1606,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } /* - * If a device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. */ - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); + pm_runtime_barrier(dev); if (pm_wakeup_pending()) { dev->power.direct_complete = false; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 3cf9bc5d8d9599abd13600af41f9f59d3c682ca8..6dba4139515517d5012f47591e1b740d15f44a86 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1135,7 +1135,7 @@ noskb: if (buf) break; } bvcpy(skb, f->buf->bio, f->iter, n); - /* fall through */ + fallthrough; case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: spin_lock_irq(&d->lock); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 1553d41f0b9161dc632b409802e4e7a962934c60..a50e13af030526ddcd8bb7a2a05a697b3791d537 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1726,7 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, /* MSch: invalidate default_params */ default_params[drive].blocks = 0; set_capacity(floppy->disk, MAX_DISK_SIZE * 2); - /* Fall through */ + fallthrough; case FDFMTEND: case FDFLUSH: /* invalidate the buffer track to force a reread */ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index fe6cb99eb917644d1c4b963eae79bb6b53a6bda1..740e93bad21feb65e8d2a077e62e30fe5aba2f90 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1733,7 +1733,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device, _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL); break; } - /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ + fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ case EP_DETACH: case EP_CALL_HELPER: /* Remember whether we saw a READ or WRITE error. diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index cb687ccdbd96f04dc2e84d20a5f9317f0de2ed04..04b6bde9419d2a49d74d7d9d65442c72dffafcdc 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -430,7 +430,7 @@ int drbd_thread_start(struct drbd_thread *thi) thi->t_state = RESTARTING; drbd_info(resource, "Restarting %s thread (from %s [%d])\n", thi->name, current->comm, current->pid); - /* fall through */ + fallthrough; case RUNNING: case RESTARTING: default: diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 28eb078f8b754d669e39129e8d7bb8e4c55f5b48..43c8ae4d9fca812bb770d89ba967a378778ea950 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -3883,7 +3883,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, if (nla_put_u32(skb, T_helper_exit_code, sib->helper_exit_code)) goto nla_put_failure; - /* fall through */ + fallthrough; case SIB_HELPER_PRE: if (nla_put_string(skb, T_helper, sib->helper_name)) goto nla_put_failure; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 1d17593f5d2bb7a4059eb91b4b425b6ca8391c58..422363daa6180b54a35ce1ee15d36ca1cc0afa80 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1797,7 +1797,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf break; else drbd_warn(connection, "Allocation of an epoch failed, slowing down\n"); - /* Fall through */ + fallthrough; case WO_BDEV_FLUSH: case WO_DRAIN_IO: @@ -2917,7 +2917,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet then we would do something smarter here than reading the block... */ peer_req->flags |= EE_RS_THIN_REQ; - /* fall through */ + fallthrough; case P_RS_DATA_REQUEST: peer_req->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; @@ -3083,7 +3083,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold rv = 1; break; } - /* Else fall through - to one of the other strategies... */ + fallthrough; /* to one of the other strategies */ case ASB_DISCARD_OLDER_PRI: if (self == 0 && peer == 1) { rv = 1; @@ -3096,7 +3096,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold /* Else fall through to one of the other strategies... */ drbd_warn(device, "Discard younger/older primary did not find a decision\n" "Using discard-least-changes instead\n"); - /* fall through */ + fallthrough; case ASB_DISCARD_ZERO_CHG: if (ch_peer == 0 && ch_self == 0) { rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) @@ -3108,7 +3108,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold } if (after_sb_0p == ASB_DISCARD_ZERO_CHG) break; - /* else, fall through */ + fallthrough; case ASB_DISCARD_LEAST_CHG: if (ch_self < ch_peer) rv = -1; @@ -3608,7 +3608,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, switch (rr_conflict) { case ASB_CALL_HELPER: drbd_khelper(device, "pri-lost"); - /* fall through */ + fallthrough; case ASB_DISCONNECT: drbd_err(device, "I shall become SyncTarget, but I am primary!\n"); return C_MASK; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 674be09b2da94af5534212f1ecde6a33898ff14e..5c975af9c15fb89d7b78ec16dfee8f6bad962611 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -611,7 +611,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, drbd_set_out_of_sync(device, req->i.sector, req->i.size); drbd_report_io_error(device, req); __drbd_chk_io_error(device, DRBD_READ_ERROR); - /* fall through. */ + fallthrough; case READ_AHEAD_COMPLETED_WITH_ERROR: /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */ mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); @@ -836,7 +836,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, } /* else: FIXME can this happen? */ break; } - /* else, fall through - to BARRIER_ACKED */ + fallthrough; /* to BARRIER_ACKED */ case BARRIER_ACKED: /* barrier ack for READ requests does not make sense */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 09079aee8dc43056ab4780339f1e87697c76dc13..a563b023458a8b2e5a931fd822dc4b7c7b29e2ad 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -1680,7 +1680,7 @@ static void recal_interrupt(void) clear_bit(FD_DISK_NEWCHANGE_BIT, &drive_state[current_drive].flags); drive_state[current_drive].select_date = jiffies; - /* fall through */ + fallthrough; default: debugt(__func__, "default"); /* Recalibrate moves the head by at @@ -3592,7 +3592,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; process_fd_request(); - /* fall through */ + fallthrough; case FDGETDRVSTAT: outparam = &drive_state[drive]; break; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 2f137d6ce169d581a144870744d39a3faeafd089..d3394191e16825267b222fff6e03f979bbf12f31 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -878,6 +878,7 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + u32 granularity, max_discard_sectors; /* * If the backing device is a block device, mirror its zeroing @@ -890,11 +891,10 @@ static void loop_config_discard(struct loop_device *lo) struct request_queue *backingq; backingq = bdev_get_queue(inode->i_bdev); - blk_queue_max_discard_sectors(q, - backingq->limits.max_write_zeroes_sectors); - blk_queue_max_write_zeroes_sectors(q, - backingq->limits.max_write_zeroes_sectors); + max_discard_sectors = backingq->limits.max_write_zeroes_sectors; + granularity = backingq->limits.discard_granularity ?: + queue_physical_block_size(backingq); /* * We use punch hole to reclaim the free space used by the @@ -903,23 +903,26 @@ static void loop_config_discard(struct loop_device *lo) * useful information. */ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { - q->limits.discard_granularity = 0; - q->limits.discard_alignment = 0; - blk_queue_max_discard_sectors(q, 0); - blk_queue_max_write_zeroes_sectors(q, 0); + max_discard_sectors = 0; + granularity = 0; } else { - q->limits.discard_granularity = inode->i_sb->s_blocksize; - q->limits.discard_alignment = 0; - - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + max_discard_sectors = UINT_MAX >> 9; + granularity = inode->i_sb->s_blocksize; } - if (q->limits.max_write_zeroes_sectors) + if (max_discard_sectors) { + q->limits.discard_granularity = granularity; + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - else + } else { + q->limits.discard_granularity = 0; + blk_queue_max_discard_sectors(q, 0); + blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); + } + q->limits.discard_alignment = 0; } static void loop_unprepare_queue(struct loop_device *lo) @@ -1111,8 +1114,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, mapping = file->f_mapping; inode = mapping->host; - size = get_loop_size(lo, file); - if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { error = -EINVAL; goto out_unlock; @@ -1162,6 +1163,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, loop_update_rotational(lo); loop_update_dio(lo); loop_sysfs_init(lo); + + size = get_loop_size(lo, file); loop_set_size(lo, size); set_blocksize(bdev, S_ISBLK(inode->i_mode) ? @@ -1719,7 +1722,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_BLOCK_SIZE: if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; - /* Fall through */ + fallthrough; default: err = lo_simple_ioctl(lo, cmd, arg); break; @@ -1867,7 +1870,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_STATUS64: case LOOP_CONFIGURE: arg = (unsigned long) compat_ptr(arg); - /* fall through */ + fallthrough; case LOOP_SET_FD: case LOOP_CHANGE_FD: case LOOP_SET_BLOCK_SIZE: diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3ff4054d6834d25f2b8afae4e99d5e2db2994f4e..edf8b632e3d27b4b907a52dd55e41e3a6b8a03ef 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1363,6 +1363,8 @@ static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) nbd->tag_set.timeout = timeout * HZ; if (timeout) blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); + else + blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); } /* Must be called with config_lock held */ diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 47a9dad880af2aac70567889f13dc42680be9a0c..d74443a9c8fa233d8fa7870b4d326e7595ac747c 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1147,7 +1147,7 @@ static int null_handle_rq(struct nullb_cmd *cmd) len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, - req_op(rq) & REQ_FUA); + rq->cmd_flags & REQ_FUA); if (err) { spin_unlock_irq(&nullb->lock); return err; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index c0967507d085e4e3cbc188a79e48db52470db1d0..a7af4f27b7c3f11eb3205a6d0bebcd2661bad7d5 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -440,7 +440,7 @@ static void run_fsm(void) pd_claimed = 1; if (!pi_schedule_claimed(pi_current, run_fsm)) return; - /* fall through */ + fallthrough; case 1: pd_claimed = 2; pi_current->proto->connect(pi_current); @@ -465,7 +465,7 @@ static void run_fsm(void) if (stop) return; } - /* fall through */ + fallthrough; case Hold: schedule_fsm(); return; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4becc1efe775fc761f44c2762878a34122d53e79..1034e445680c5621e6e6bf0a5183fcfc81b658e6 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2641,7 +2641,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, */ if (pd->refcnt == 1) pkt_lock_door(pd, 0); - /* fall through */ + fallthrough; /* * forward selected CDROM ioctls to CD-ROM, for UDF */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d9c0e7d154f9f116b15f2065524eab4fa2e059aa..e77eaab5cf23ed1192be0b9bbebb32fb238a136d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3293,7 +3293,7 @@ static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result) case __RBD_OBJ_COPYUP_OBJECT_MAPS: if (!pending_result_dec(&obj_req->pending, result)) return false; - /* fall through */ + fallthrough; case RBD_OBJ_COPYUP_OBJECT_MAPS: if (*result) { rbd_warn(rbd_dev, "snap object map update failed: %d", @@ -3312,7 +3312,7 @@ static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result) case __RBD_OBJ_COPYUP_WRITE_OBJECT: if (!pending_result_dec(&obj_req->pending, result)) return false; - /* fall through */ + fallthrough; case RBD_OBJ_COPYUP_WRITE_OBJECT: return true; default: @@ -3399,7 +3399,7 @@ static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result) case __RBD_OBJ_WRITE_COPYUP: if (!rbd_obj_advance_copyup(obj_req, result)) return false; - /* fall through */ + fallthrough; case RBD_OBJ_WRITE_COPYUP: if (*result) { rbd_warn(rbd_dev, "copyup failed: %d", *result); @@ -3592,7 +3592,7 @@ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) case __RBD_IMG_OBJECT_REQUESTS: if (!pending_result_dec(&img_req->pending, result)) return false; - /* fall through */ + fallthrough; case RBD_IMG_OBJECT_REQUESTS: return true; default: @@ -5120,6 +5120,9 @@ static ssize_t rbd_config_info_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return sprintf(buf, "%s\n", rbd_dev->config_info); } @@ -5231,6 +5234,9 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = rbd_dev_refresh(rbd_dev); if (ret) return ret; @@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_client *rbdc; int rc; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!try_module_get(THIS_MODULE)) return -ENODEV; @@ -7209,6 +7218,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, bool force = false; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + dev_id = -1; opt_buf[0] = '\0'; sscanf(buf, "%d %5s", &dev_id, opt_buf); diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c index 0fb94843a495f96566ce159639e5d8faf1cbf7ca..e1bc8b4cd5929000b97a4b2172df285e649c0485 100644 --- a/drivers/block/rnbd/rnbd-srv.c +++ b/drivers/block/rnbd/rnbd-srv.c @@ -148,7 +148,8 @@ static int process_rdma(struct rtrs_srv *sess, /* Generate bio with pages pointing to the rdma buffer */ bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL); if (IS_ERR(bio)) { - rnbd_srv_err(sess_dev, "Failed to generate bio, err: %ld\n", PTR_ERR(bio)); + err = PTR_ERR(bio); + rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err); goto sess_dev_put; } diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 7e261224ff1065d267b5fe4b6885c03c13462dc2..8799e3bab0677ba4b47d814860a361c89f10d0f3 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -425,7 +425,7 @@ static void card_state_change(struct rsxx_cardinfo *card, * Fall through so the DMA devices can be attached and * the user can attempt to pull off their data. */ - /* fall through */ + fallthrough; case CARD_STATE_GOOD: st = rsxx_get_card_size8(card, &card->size8); if (st) diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 3a476dc1d14f57b5fceee13550bf9f6dacbf5590..ae6454c24594f9eb81b568b33d29951f4a2eaba6 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -1436,7 +1436,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, blk_mq_requeue_request(req, true); break; } - /* fall through */ + fallthrough; case SKD_CHECK_STATUS_REPORT_ERROR: default: diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 63b213e00b37525767a70e4e9f6b2c33324941a3..b2e48dac1ebdb88308d59a27cefdf2df853c3dfd 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -126,16 +126,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap) if (!range) return -ENOMEM; - __rq_for_each_bio(bio, req) { - u64 sector = bio->bi_iter.bi_sector; - u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; - - range[n].flags = cpu_to_le32(flags); - range[n].num_sectors = cpu_to_le32(num_sectors); - range[n].sector = cpu_to_le64(sector); - n++; + /* + * Single max discard segment means multi-range discard isn't + * supported, and block layer only runs contiguity merge like + * normal RW request. So we can't reply on bio for retrieving + * each range info. + */ + if (queue_max_discard_segments(req->q) == 1) { + range[0].flags = cpu_to_le32(flags); + range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req)); + range[0].sector = cpu_to_le64(blk_rq_pos(req)); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 sector = bio->bi_iter.bi_sector; + u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; + + range[n].flags = cpu_to_le32(flags); + range[n].num_sectors = cpu_to_le32(num_sectors); + range[n].sector = cpu_to_le64(sector); + n++; + } } + WARN_ON_ONCE(n != segments); + req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_offset = offset_in_page(range); req->special_vec.bv_len = sizeof(*range) * segments; diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index c2f71265af4b57fb7341fccc510b76211a609d35..adfc9352351dfbc4d48794b17d9d4ff3e886efd3 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1260,7 +1260,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, break; case BLKIF_OP_WRITE_BARRIER: drain = true; - /* fall through */ + fallthrough; case BLKIF_OP_FLUSH_DISKCACHE: ring->st_f_req++; operation = REQ_OP_WRITE; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 42944d41aea0385440b0bf67d3762ad4f904ba28..b9aa5d1ac10b7ffcf0809501500719c2610f25f6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -843,7 +843,7 @@ static void frontend_changed(struct xenbus_device *dev, xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through */ + fallthrough; /* if not online */ case XenbusStateUnknown: /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 3bb3dd8da9b0c101ecedef262966e5eb56f51549..91de2e0755aec49fb4a12b3c76eb1a60a3710ad5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1403,7 +1403,6 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp) case BLKIF_RSP_EOPNOTSUPP: return REQ_EOPNOTSUPP; case BLKIF_RSP_ERROR: - /* Fallthrough. */ default: return REQ_ERROR; } @@ -1643,7 +1642,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) info->feature_flush = 0; xlvbd_flush(info); } - /* fall through */ + fallthrough; case BLKIF_OP_READ: case BLKIF_OP_WRITE: if (unlikely(bret->status != BLKIF_RSP_OKAY)) @@ -2484,7 +2483,7 @@ static void blkback_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through */ + fallthrough; case XenbusStateClosing: if (info) blkfront_closing(info); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index fb5a901fd89e57bb4f69b71c3c715bd418ef2eb2..efb088df1276689bb59136fbd0816a3c74903eef 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1849,7 +1849,7 @@ static int sysc_clockdomain_init(struct sysc *ddata) switch (ddata->nr_clocks) { case 2: ick = ddata->clocks[SYSC_ICK]; - /* fallthrough */ + fallthrough; case 1: fck = ddata->clocks[SYSC_FCK]; break; diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 89527bae4602a906bde52ee056bb9acb0306c01a..760d9a9312898bb879fb5222a0dea24e63b7b43d 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -357,7 +357,7 @@ static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent) default: break; } - /*FALLTHROUGH*/ + fallthrough; default: bridge->driver = &ali_generic_bridge; } diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c index d704cef64b64a8c33a0a13b3c5b59be79bd21ce5..055cfe59f519dbc6c75154eadb9f06e529b86406 100644 --- a/drivers/char/hw_random/ingenic-rng.c +++ b/drivers/char/hw_random/ingenic-rng.c @@ -92,8 +92,7 @@ static int ingenic_rng_probe(struct platform_device *pdev) priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { pr_err("%s: Failed to map RNG registers\n", __func__); - ret = PTR_ERR(priv->base); - goto err_free_rng; + return PTR_ERR(priv->base); } priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev); @@ -106,17 +105,13 @@ static int ingenic_rng_probe(struct platform_device *pdev) ret = hwrng_register(&priv->rng); if (ret) { dev_err(&pdev->dev, "Failed to register hwrng\n"); - goto err_free_rng; + return ret; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Ingenic RNG driver registered\n"); return 0; - -err_free_rng: - kfree(priv); - return ret; } static int ingenic_rng_remove(struct platform_device *pdev) diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index ed4dc3b1843e3a06995750deda672b5bb830444a..f292e74bd4a51585dfa27df0d65aa55ad4993728 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -99,7 +99,7 @@ static void kcs_bmc_handle_data(struct kcs_bmc *kcs_bmc) switch (kcs_bmc->phase) { case KCS_PHASE_WRITE_START: kcs_bmc->phase = KCS_PHASE_WRITE_DATA; - /* fall through */ + fallthrough; case KCS_PHASE_WRITE_DATA: if (kcs_bmc->data_in_idx < KCS_MSG_BUFSIZ) { diff --git a/drivers/char/lp.c b/drivers/char/lp.c index bd95aba1f9fe8eb9768b87a8a7fc602ee5adef8d..45932f05fd678f4e0d5524bca5e19262d21c316b 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -734,7 +734,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd, ret = lp_set_timeout32(minor, (void __user *)arg); break; } - /* fall through - for 64-bit */ + fallthrough; /* for 64-bit */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; @@ -762,7 +762,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd, ret = lp_set_timeout32(minor, (void __user *)arg); break; } - /* fall through - for x32 mode */ + fallthrough; /* for x32 mode */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 687d4af6945d365baebefee5b005eebfb17ad2d9..abd4ffdc8cdebc4ade04d4b77875bd894b063863 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -791,7 +791,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) switch (orig) { case SEEK_CUR: offset += file->f_pos; - /* fall through */ + fallthrough; case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ if ((unsigned long long)offset >= -MAX_ERRNO) { diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 8206412d25ba7ee326c929e86bc0c97850420ec7..e9f694b368719f40e9c26e4029ccb5ba263f604e 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -286,7 +286,7 @@ static long nvram_misc_ioctl(struct file *file, unsigned int cmd, #ifdef CONFIG_PPC case OBSOLETE_PMAC_NVRAM_GET_OFFSET: pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); - /* fall through */ + fallthrough; case IOC_NVRAM_GET_OFFSET: ret = -EINVAL; #ifdef CONFIG_PPC_PMAC diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 3576ad7bd380fbbbc9c0334ed2b6d080e4ba841a..68b087bff59cc168e8b5366d89844ccf26f4a628 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -653,9 +653,8 @@ config ATCPIT100_TIMER This option enables support for the Andestech ATCPIT100 timers. config RISCV_TIMER - bool "Timer for the RISC-V platform" + bool "Timer for the RISC-V platform" if COMPILE_TEST depends on GENERIC_SCHED_CLOCK && RISCV - default y select TIMER_PROBE select TIMER_OF help @@ -663,6 +662,15 @@ config RISCV_TIMER is accessed via both the SBI and the rdcycle instruction. This is required for all RISC-V systems. +config CLINT_TIMER + bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST + depends on GENERIC_SCHED_CLOCK && RISCV + select TIMER_PROBE + select TIMER_OF + help + This option enables the CLINT timer for RISC-V systems. The CLINT + driver is usually used for NoMMU RISC-V systems. + config CSKY_MP_TIMER bool "SMP Timer for the C-SKY platform" if COMPILE_TEST depends on CSKY diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index eaedb7240ae74f073383ce0a01a869c744706ddf..1c444cc3bb4474016e7d45cc2e1794ea127f8824 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o obj-$(CONFIG_X86_NUMACHIP) += numachip.o obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o +obj-$(CONFIG_CLINT_TIMER) += timer-clint.o obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c index 38858e141731ef855047977c3da6b2a69241a2d0..80e9606020307e02621406483bb6f308e418cc70 100644 --- a/drivers/clocksource/timer-cadence-ttc.c +++ b/drivers/clocksource/timer-cadence-ttc.c @@ -309,7 +309,7 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, /* restore original register value */ writel_relaxed(ttccs->scale_clk_ctrl_reg_old, ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - /* fall through */ + fallthrough; default: return NOTIFY_DONE; } @@ -392,7 +392,7 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE); - /* fall through */ + fallthrough; case PRE_RATE_CHANGE: case ABORT_RATE_CHANGE: default: diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c new file mode 100644 index 0000000000000000000000000000000000000000..8eeafa82c03d3df9f9603115b8aed31ece17577b --- /dev/null +++ b/drivers/clocksource/timer-clint.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + * + * Most of the M-mode (i.e. NoMMU) RISC-V systems usually have a + * CLINT MMIO timer device. + */ + +#define pr_fmt(fmt) "clint: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLINT_IPI_OFF 0 +#define CLINT_TIMER_CMP_OFF 0x4000 +#define CLINT_TIMER_VAL_OFF 0xbff8 + +/* CLINT manages IPI and Timer for RISC-V M-mode */ +static u32 __iomem *clint_ipi_base; +static u64 __iomem *clint_timer_cmp; +static u64 __iomem *clint_timer_val; +static unsigned long clint_timer_freq; +static unsigned int clint_timer_irq; + +static void clint_send_ipi(const struct cpumask *target) +{ + unsigned int cpu; + + for_each_cpu(cpu, target) + writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu)); +} + +static void clint_clear_ipi(void) +{ + writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id())); +} + +static struct riscv_ipi_ops clint_ipi_ops = { + .ipi_inject = clint_send_ipi, + .ipi_clear = clint_clear_ipi, +}; + +#ifdef CONFIG_64BIT +#define clint_get_cycles() readq_relaxed(clint_timer_val) +#else +#define clint_get_cycles() readl_relaxed(clint_timer_val) +#define clint_get_cycles_hi() readl_relaxed(((u32 *)clint_timer_val) + 1) +#endif + +#ifdef CONFIG_64BIT +static u64 notrace clint_get_cycles64(void) +{ + return clint_get_cycles(); +} +#else /* CONFIG_64BIT */ +static u64 notrace clint_get_cycles64(void) +{ + u32 hi, lo; + + do { + hi = clint_get_cycles_hi(); + lo = clint_get_cycles(); + } while (hi != clint_get_cycles_hi()); + + return ((u64)hi << 32) | lo; +} +#endif /* CONFIG_64BIT */ + +static u64 clint_rdtime(struct clocksource *cs) +{ + return clint_get_cycles64(); +} + +static struct clocksource clint_clocksource = { + .name = "clint_clocksource", + .rating = 300, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .read = clint_rdtime, +}; + +static int clint_clock_next_event(unsigned long delta, + struct clock_event_device *ce) +{ + void __iomem *r = clint_timer_cmp + + cpuid_to_hartid_map(smp_processor_id()); + + csr_set(CSR_IE, IE_TIE); + writeq_relaxed(clint_get_cycles64() + delta, r); + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, clint_clock_event) = { + .name = "clint_clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 100, + .set_next_event = clint_clock_next_event, +}; + +static int clint_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu); + + ce->cpumask = cpumask_of(cpu); + clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff); + + enable_percpu_irq(clint_timer_irq, + irq_get_trigger_type(clint_timer_irq)); + return 0; +} + +static int clint_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(clint_timer_irq); + return 0; +} + +static irqreturn_t clint_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evdev = this_cpu_ptr(&clint_clock_event); + + csr_clear(CSR_IE, IE_TIE); + evdev->event_handler(evdev); + + return IRQ_HANDLED; +} + +static int __init clint_timer_init_dt(struct device_node *np) +{ + int rc; + u32 i, nr_irqs; + void __iomem *base; + struct of_phandle_args oirq; + + /* + * Ensure that CLINT device interrupts are either RV_IRQ_TIMER or + * RV_IRQ_SOFT. If it's anything else then we ignore the device. + */ + nr_irqs = of_irq_count(np); + for (i = 0; i < nr_irqs; i++) { + if (of_irq_parse_one(np, i, &oirq)) { + pr_err("%pOFP: failed to parse irq %d.\n", np, i); + continue; + } + + if ((oirq.args_count != 1) || + (oirq.args[0] != RV_IRQ_TIMER && + oirq.args[0] != RV_IRQ_SOFT)) { + pr_err("%pOFP: invalid irq %d (hwirq %d)\n", + np, i, oirq.args[0]); + return -ENODEV; + } + + /* Find parent irq domain and map timer irq */ + if (!clint_timer_irq && + oirq.args[0] == RV_IRQ_TIMER && + irq_find_host(oirq.np)) + clint_timer_irq = irq_of_parse_and_map(np, i); + } + + /* If CLINT timer irq not found then fail */ + if (!clint_timer_irq) { + pr_err("%pOFP: timer irq not found\n", np); + return -ENODEV; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("%pOFP: could not map registers\n", np); + return -ENODEV; + } + + clint_ipi_base = base + CLINT_IPI_OFF; + clint_timer_cmp = base + CLINT_TIMER_CMP_OFF; + clint_timer_val = base + CLINT_TIMER_VAL_OFF; + clint_timer_freq = riscv_timebase; + + pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq); + + rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq); + if (rc) { + pr_err("%pOFP: clocksource register failed [%d]\n", np, rc); + goto fail_iounmap; + } + + sched_clock_register(clint_get_cycles64, 64, clint_timer_freq); + + rc = request_percpu_irq(clint_timer_irq, clint_timer_interrupt, + "clint-timer", &clint_clock_event); + if (rc) { + pr_err("registering percpu irq failed [%d]\n", rc); + goto fail_iounmap; + } + + rc = cpuhp_setup_state(CPUHP_AP_CLINT_TIMER_STARTING, + "clockevents/clint/timer:starting", + clint_timer_starting_cpu, + clint_timer_dying_cpu); + if (rc) { + pr_err("%pOFP: cpuhp setup state failed [%d]\n", np, rc); + goto fail_free_irq; + } + + riscv_set_ipi_ops(&clint_ipi_ops); + clint_clear_ipi(); + + return 0; + +fail_free_irq: + free_irq(clint_timer_irq, &clint_clock_event); +fail_iounmap: + iounmap(base); + return rc; +} + +TIMER_OF_DECLARE(clint_timer, "riscv,clint0", clint_timer_init_dt); +TIMER_OF_DECLARE(clint_timer1, "sifive,clint0", clint_timer_init_dt); diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 9de1dabfb1268c12a875c5a2ca879ddf2a889c57..c51c5ed15aa75bb852ee4e3fbcc0554a43324628 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -19,26 +19,13 @@ #include #include #include - -u64 __iomem *riscv_time_cmp; -u64 __iomem *riscv_time_val; - -static inline void mmio_set_timer(u64 val) -{ - void __iomem *r; - - r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id()); - writeq_relaxed(val, r); -} +#include static int riscv_clock_next_event(unsigned long delta, struct clock_event_device *ce) { csr_set(CSR_IE, IE_TIE); - if (IS_ENABLED(CONFIG_RISCV_SBI)) - sbi_set_timer(get_cycles64() + delta); - else - mmio_set_timer(get_cycles64() + delta); + sbi_set_timer(get_cycles64() + delta); return 0; } diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index f7b7743ddb947cee00e2d92b8aa576634751ab0a..b7b252c5addf1b1d5113e20a07382f67bbd2e588 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -320,8 +320,8 @@ static int mchp_tc_probe(struct platform_device *pdev) } regmap = syscon_node_to_regmap(np->parent); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); /* max. channels number is 2 when in QDEC mode */ priv->num_channels = of_property_count_u32_elems(np, "reg"); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 02ab56b2a0d8dd69856dc84ce9ed25fb2eebc2d5..47aa90f9a7c2e214de70a4917f4c0968e40a7a25 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -703,8 +703,7 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) freq = arch_freq_get_on_cpu(policy->cpu); if (freq) ret = sprintf(buf, "%u\n", freq); - else if (cpufreq_driver && cpufreq_driver->setpolicy && - cpufreq_driver->get) + else if (cpufreq_driver->setpolicy && cpufreq_driver->get) ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); else ret = sprintf(buf, "%u\n", policy->cur); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e0220a6fbc69e65a1dd92c47ffe2c3fd91c324fb..a827b000ef51750fb8b33dc5e0bc2e9c69471eb3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -219,14 +219,13 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_saved: Saved EPP/EPB during system suspend or CPU offline - * operation * @epp_cached Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance + * @suspended: Whether or not the driver has been suspended. * * This structure stores per CPU instance data for all CPUs. */ @@ -258,13 +257,13 @@ struct cpudata { s16 epp_powersave; s16 epp_policy; s16 epp_default; - s16 epp_saved; s16 epp_cached; u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; unsigned int sched_flags; u32 hwp_boost_min; + bool suspended; }; static struct cpudata **all_cpu_data; @@ -644,6 +643,8 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) { + int ret; + /* * Use the cached HWP Request MSR value, because in the active mode the * register itself may be updated by intel_pstate_hwp_boost_up() or @@ -659,7 +660,11 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) * function, so it cannot run in parallel with the update below. */ WRITE_ONCE(cpu->hwp_req_cached, value); - return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + if (!ret) + cpu->epp_cached = epp; + + return ret; } static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, @@ -678,6 +683,14 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, else if (epp == -EINVAL) epp = epp_values[pref_index - 1]; + /* + * To avoid confusion, refuse to set EPP to any values different + * from 0 (performance) if the current policy is "performance", + * because those values would be overridden. + */ + if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) + return -EBUSY; + ret = intel_pstate_set_epp(cpu_data, epp); } else { if (epp == -EINVAL) @@ -762,10 +775,8 @@ static ssize_t store_energy_performance_preference( cpufreq_stop_governor(policy); ret = intel_pstate_set_epp(cpu, epp); err = cpufreq_start_governor(policy); - if (!ret) { - cpu->epp_cached = epp; + if (!ret) ret = err; - } } } @@ -825,7 +836,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); @@ -859,12 +870,6 @@ static void intel_pstate_hwp_set(unsigned int cpu) cpu_data->epp_policy = cpu_data->policy; - if (cpu_data->epp_saved >= 0) { - epp = cpu_data->epp_saved; - cpu_data->epp_saved = -EINVAL; - goto update_epp; - } - if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { epp = intel_pstate_get_epp(cpu_data, value); cpu_data->epp_powersave = epp; @@ -891,7 +896,6 @@ static void intel_pstate_hwp_set(unsigned int cpu) epp = cpu_data->epp_powersave; } -update_epp: if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; @@ -903,14 +907,24 @@ static void intel_pstate_hwp_set(unsigned int cpu) wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } -static void intel_pstate_hwp_force_min_perf(int cpu) +static void intel_pstate_hwp_offline(struct cpudata *cpu) { - u64 value; + u64 value = READ_ONCE(cpu->hwp_req_cached); int min_perf; - value = all_cpu_data[cpu]->hwp_req_cached; + if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { + /* + * In case the EPP has been set to "performance" by the + * active mode "performance" scaling algorithm, replace that + * temporary value with the cached EPP one. + */ + value &= ~GENMASK_ULL(31, 24); + value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); + WRITE_ONCE(cpu->hwp_req_cached, value); + } + value &= ~GENMASK_ULL(31, 0); - min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached); + min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached); /* Set hwp_max = hwp_min */ value |= HWP_MAX_PERF(min_perf); @@ -920,19 +934,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) if (boot_cpu_has(X86_FEATURE_HWP_EPP)) value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); - wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); -} - -static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) -{ - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; - - if (!hwp_active) - return 0; - - cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); - - return 0; + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); } #define POWER_CTL_EE_ENABLE 1 @@ -959,8 +961,28 @@ static void set_power_ctl_ee_state(bool input) static void intel_pstate_hwp_enable(struct cpudata *cpudata); +static void intel_pstate_hwp_reenable(struct cpudata *cpu) +{ + intel_pstate_hwp_enable(cpu); + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); +} + +static int intel_pstate_suspend(struct cpufreq_policy *policy) +{ + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d suspending\n", cpu->cpu); + + cpu->suspended = true; + + return 0; +} + static int intel_pstate_resume(struct cpufreq_policy *policy) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d resuming\n", cpu->cpu); /* Only restore if the system default is changed */ if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) @@ -968,18 +990,16 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) set_power_ctl_ee_state(false); - if (!hwp_active) - return 0; + if (cpu->suspended && hwp_active) { + mutex_lock(&intel_pstate_limits_lock); - mutex_lock(&intel_pstate_limits_lock); + /* Re-enable HWP, because "online" has not done that. */ + intel_pstate_hwp_reenable(cpu); - if (policy->cpu == 0) - intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); + mutex_unlock(&intel_pstate_limits_lock); + } - all_cpu_data[policy->cpu]->epp_policy = 0; - intel_pstate_hwp_set(policy->cpu); - - mutex_unlock(&intel_pstate_limits_lock); + cpu->suspended = false; return 0; } @@ -1428,7 +1448,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); - cpudata->epp_policy = 0; if (cpudata->epp_default == -EINVAL) cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); } @@ -2097,25 +2116,31 @@ static int intel_pstate_init_cpu(unsigned int cpunum) all_cpu_data[cpunum] = cpu; - cpu->epp_default = -EINVAL; - cpu->epp_powersave = -EINVAL; - cpu->epp_saved = -EINVAL; - } - - cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; - cpu->cpu = cpunum; + cpu->epp_default = -EINVAL; - if (hwp_active) { - const struct x86_cpu_id *id; + if (hwp_active) { + const struct x86_cpu_id *id; - intel_pstate_hwp_enable(cpu); + intel_pstate_hwp_enable(cpu); - id = x86_match_cpu(intel_pstate_hwp_boost_ids); - if (id && intel_pstate_acpi_pm_profile_server()) - hwp_boost = true; + id = x86_match_cpu(intel_pstate_hwp_boost_ids); + if (id && intel_pstate_acpi_pm_profile_server()) + hwp_boost = true; + } + } else if (hwp_active) { + /* + * Re-enable HWP in case this happens after a resume from ACPI + * S3 if the CPU was offline during the whole system/resume + * cycle. + */ + intel_pstate_hwp_reenable(cpu); } + cpu->epp_powersave = -EINVAL; + cpu->epp_policy = 0; + intel_pstate_get_cpu_pstates(cpu); pr_debug("controlling: cpu %d\n", cpunum); @@ -2296,28 +2321,61 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) return 0; } -static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) +static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d going offline\n", cpu->cpu); + + if (cpu->suspended) + return 0; + + /* + * If the CPU is an SMT thread and it goes offline with the performance + * settings different from the minimum, it will prevent its sibling + * from getting to lower performance levels, so force the minimum + * performance on CPU offline to prevent that from happening. + */ if (hwp_active) - intel_pstate_hwp_force_min_perf(policy->cpu); + intel_pstate_hwp_offline(cpu); else - intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); + intel_pstate_set_min_pstate(cpu); + + intel_pstate_exit_perf_limits(policy); + + return 0; +} + +static int intel_pstate_cpu_online(struct cpufreq_policy *policy) +{ + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + pr_debug("CPU %d going online\n", cpu->cpu); + + intel_pstate_init_acpi_perf_limits(policy); + + if (hwp_active) { + /* + * Re-enable HWP and clear the "suspended" flag to let "resume" + * know that it need not do that. + */ + intel_pstate_hwp_reenable(cpu); + cpu->suspended = false; + } + + return 0; } static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) { - pr_debug("CPU %d exiting\n", policy->cpu); + pr_debug("CPU %d stopping\n", policy->cpu); intel_pstate_clear_update_util_hook(policy->cpu); - if (hwp_active) - intel_pstate_hwp_save_state(policy); - - intel_cpufreq_stop_cpu(policy); } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) { - intel_pstate_exit_perf_limits(policy); + pr_debug("CPU %d exiting\n", policy->cpu); policy->fast_switch_possible = false; @@ -2378,6 +2436,12 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) */ policy->policy = CPUFREQ_POLICY_POWERSAVE; + if (hwp_active) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + + cpu->epp_cached = intel_pstate_get_epp(cpu, 0); + } + return 0; } @@ -2385,11 +2449,13 @@ static struct cpufreq_driver intel_pstate = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_pstate_verify_policy, .setpolicy = intel_pstate_set_policy, - .suspend = intel_pstate_hwp_save_state, + .suspend = intel_pstate_suspend, .resume = intel_pstate_resume, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, .stop_cpu = intel_pstate_stop_cpu, + .offline = intel_pstate_cpu_offline, + .online = intel_pstate_cpu_online, .update_limits = intel_pstate_update_limits, .name = "intel_pstate", }; @@ -2585,7 +2651,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); WRITE_ONCE(cpu->hwp_req_cached, value); - cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24; + cpu->epp_cached = intel_pstate_get_epp(cpu, value); } else { turbo_max = cpu->pstate.turbo_pstate; policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; @@ -2644,7 +2710,10 @@ static struct cpufreq_driver intel_cpufreq = { .fast_switch = intel_cpufreq_fast_switch, .init = intel_cpufreq_cpu_init, .exit = intel_cpufreq_cpu_exit, - .stop_cpu = intel_cpufreq_stop_cpu, + .offline = intel_pstate_cpu_offline, + .online = intel_pstate_cpu_online, + .suspend = intel_pstate_suspend, + .resume = intel_pstate_resume, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", }; @@ -2667,9 +2736,6 @@ static void intel_pstate_driver_cleanup(void) } put_online_cpus(); - if (intel_pstate_driver == &intel_pstate) - intel_pstate_sysfs_hide_hwp_dynamic_boost(); - intel_pstate_driver = NULL; } @@ -2695,14 +2761,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) return 0; } -static int intel_pstate_unregister_driver(void) -{ - cpufreq_unregister_driver(intel_pstate_driver); - intel_pstate_driver_cleanup(); - - return 0; -} - static ssize_t intel_pstate_show_status(char *buf) { if (!intel_pstate_driver) @@ -2714,20 +2772,23 @@ static ssize_t intel_pstate_show_status(char *buf) static int intel_pstate_update_status(const char *buf, size_t size) { - int ret; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; + + if (hwp_active) + return -EBUSY; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + cpufreq_unregister_driver(intel_pstate_driver); + intel_pstate_driver_cleanup(); + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { if (intel_pstate_driver == &intel_pstate) return 0; - ret = intel_pstate_unregister_driver(); - if (ret) - return ret; + cpufreq_unregister_driver(intel_pstate_driver); } return intel_pstate_register_driver(&intel_pstate); @@ -2738,9 +2799,8 @@ static int intel_pstate_update_status(const char *buf, size_t size) if (intel_pstate_driver == &intel_cpufreq) return 0; - ret = intel_pstate_unregister_driver(); - if (ret) - return ret; + cpufreq_unregister_driver(intel_pstate_driver); + intel_pstate_sysfs_hide_hwp_dynamic_boost(); } return intel_pstate_register_driver(&intel_cpufreq); diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index bb61677c11c7142c1d5b41d8a1855846fb1b007d..ef0a3216a3862393b5d41128f0766af9d1515380 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -129,7 +129,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); case 0x0D: /* Pentium M (Dothan) */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; - /* fall through */ + fallthrough; case 0x09: /* Pentium M (Banias) */ return speedstep_get_frequency(SPEEDSTEP_CPU_PM); } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 5c4f8f07c5a62a9682a48a162b858b34d2c412a4..a13a2d1e444ed6e62ff357927f1912b6d0408131 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -366,7 +366,7 @@ enum speedstep_processor speedstep_detect_processor(void) } else return SPEEDSTEP_CPU_PIII_C; } - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index bae527e507e017821f1f5b424186773375f1339c..e1d931c457a732de79a7af61fa55e9a31dff7088 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -56,9 +56,11 @@ struct read_counters_work { static struct workqueue_struct *read_counters_wq; -static enum cluster get_cpu_cluster(u8 cpu) +static void get_cpu_cluster(void *cluster) { - return MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + + *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1); } /* @@ -186,8 +188,10 @@ static unsigned int tegra194_get_speed(u32 cpu) static int tegra194_cpufreq_init(struct cpufreq_policy *policy) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - int cl = get_cpu_cluster(policy->cpu); u32 cpu; + u32 cl; + + smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true); if (cl >= data->num_clusters) return -EINVAL; diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index ab0de27539ad4e2e855efd88a609b689343276ed..8f9fdd864391a697daa33c74a8af118d1c0de4e0 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -86,11 +86,11 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data, case DRA76_EFUSE_HAS_PLUS_MPU_OPP: case DRA76_EFUSE_HAS_ALL_MPU_OPP: calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP; - /* Fall through */ + fallthrough; case DRA7_EFUSE_HAS_ALL_MPU_OPP: case DRA7_EFUSE_HAS_HIGH_MPU_OPP: calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP; - /* Fall through */ + fallthrough; case DRA7_EFUSE_HAS_OD_MPU_OPP: calculated_efuse |= DRA7_EFUSE_OD_MPU_OPP; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 87197319ab0694663033789ffdabe11f79ce23a5..04becd70cc41f5a147af992f2a6ff0f7a9cfe96f 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "cpuidle.h" @@ -145,21 +146,24 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, * executing it contains RCU usage regarded as invalid in the idle * context, so tell RCU about that. */ - RCU_NONIDLE(tick_freeze()); + tick_freeze(); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping * suspended is generally unsafe. */ stop_critical_timings(); + rcu_idle_enter(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ - RCU_NONIDLE(tick_unfreeze()); + rcu_idle_exit(); + tick_unfreeze(); start_critical_timings(); time_end = ns_to_ktime(local_clock()); @@ -225,19 +229,24 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, broadcast = false; } + if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED) + leave_mm(dev->cpu); + /* Take note of the planned idle state. */ sched_idle_set_state(target_state); - trace_cpu_idle_rcuidle(index, dev->cpu); + trace_cpu_idle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); stop_critical_timings(); + rcu_idle_enter(); entered_state = target_state->enter(dev, drv, index); + rcu_idle_exit(); start_critical_timings(); sched_clock_idle_wakeup_event(); time_end = ns_to_ktime(local_clock()); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ sched_idle_set_state(NULL); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index aa3a4ed07a66696587d9ed6682d4bd9dba956af0..52a9b7cf6576f97b23e709029deac470ee54748f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -873,6 +873,9 @@ config CRYPTO_DEV_SA2UL select CRYPTO_AES select CRYPTO_AES_ARM64 select CRYPTO_ALGAPI + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 select HW_RANDOM select SG_SPLIT help diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 1a46eeddf0824249128506fd90e8719054973ac4..809c3033ca74897e93a06ce1808f91652ee76209 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2310,7 +2310,7 @@ static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req) case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START: ret = 0; - /* Fallthrough */ + fallthrough; default: artpec6_crypto_common_destroy(&req_ctx->common); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index dc5fda522719834c57722d2b848c87874bcf88a6..4fe7898c85615a7d8d9dabddc0e6a933368e8463 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -90,11 +90,11 @@ static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list, case 3: sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); - /* Fall through */ + fallthrough; case 2: sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); - /* Fall through */ + fallthrough; case 1: sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c index 91dee616d15e71e39f7f17f566eb9f47cb993392..c5cce024886ace0eea398ebb0fa2ac8f2779e60e 100644 --- a/drivers/crypto/chelsio/chcr_ktls.c +++ b/drivers/crypto/chelsio/chcr_ktls.c @@ -135,7 +135,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, break; /* update to the next state and also initialize TCB */ tx_info->connection_state = new_state; - /* FALLTHRU */ + fallthrough; case KTLS_CONN_ACT_OPEN_RPL: /* if we are stuck in this state, means tcb init might not * received by HW, try sending it again. @@ -150,7 +150,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, break; /* update to the next state and check if l2t_state is valid */ tx_info->connection_state = new_state; - /* FALLTHRU */ + fallthrough; case KTLS_CONN_SET_TCB_RPL: /* Check if l2t state is valid, then move to ready state. */ if (cxgb4_check_l2t_valid(tx_info->l2te)) { diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index cbc3d7869ebe49772a85272167ff7c4a6be6ac71..c80baf1ad90b2f0d3ec4cfd4a7cf3a94b1fe5d5c 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -140,11 +140,11 @@ static inline int setup_sgio_components(struct pci_dev *pdev, case 3: sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); - /* Fall through */ + fallthrough; case 2: sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); - /* Fall through */ + fallthrough; case 1: sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 1c8ca151a9635af4c95dc73d74d5adf514462fdc..ec9b390276d6fb10df3c66d045a229d73f8612e8 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -131,9 +131,10 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); - ret = readl_poll_timeout(mailbox + mb_offset, status, - status == 0, ADF_ADMIN_POLL_DELAY_US, - ADF_ADMIN_POLL_TIMEOUT_US); + ret = read_poll_timeout(ADF_CSR_RD, status, status == 0, + ADF_ADMIN_POLL_DELAY_US, + ADF_ADMIN_POLL_TIMEOUT_US, true, + mailbox, mb_offset); if (ret < 0) { /* Response timeout */ dev_err(&GET_DEV(accel_dev), diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 519fd5acf713bb022cc1ad0e479e1b10457b016d..8b090b7ae8c6b6b8cdef2eab47253f444396845d 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -340,7 +340,7 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) /* VF is newer than PF and decides whether it is compatible */ if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) break; - /* fall through */ + fallthrough; case ADF_PF2VF_VF_INCOMPATIBLE: dev_err(&GET_DEV(accel_dev), "PF (vers %d) and VF (vers %d) are not compatible\n", diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index bff759e2f8112965661707f336777cea4a5f1fc5..00c615f9f9a839a0b9782956755e44a0c6ec124d 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -752,7 +752,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_GPA_ABS: case ICP_GPB_ABS: ctx_mask = 0; - /* fall through */ + fallthrough; case ICP_GPA_REL: case ICP_GPB_REL: return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, @@ -762,7 +762,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_SR_RD_ABS: case ICP_DR_RD_ABS: ctx_mask = 0; - /* fall through */ + fallthrough; case ICP_SR_REL: case ICP_DR_REL: case ICP_SR_RD_REL: @@ -772,7 +772,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, case ICP_SR_WR_ABS: case ICP_DR_WR_ABS: ctx_mask = 0; - /* fall through */ + fallthrough; case ICP_SR_WR_REL: case ICP_DR_WR_REL: return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index f22f6fa612b379efb674383e8470c3e526d9f5e9..9866c2a5e9a70133ce110fdc8187e8a25f279715 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -314,17 +314,17 @@ void cryp_save_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: ctx->key_4_l = readl_relaxed(&src_reg->key_4_l); ctx->key_4_r = readl_relaxed(&src_reg->key_4_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_192: ctx->key_3_l = readl_relaxed(&src_reg->key_3_l); ctx->key_3_r = readl_relaxed(&src_reg->key_3_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_128: ctx->key_2_l = readl_relaxed(&src_reg->key_2_l); ctx->key_2_r = readl_relaxed(&src_reg->key_2_r); - /* Fall through */ + fallthrough; default: ctx->key_1_l = readl_relaxed(&src_reg->key_1_l); @@ -364,17 +364,17 @@ void cryp_restore_device_context(struct cryp_device_data *device_data, case CRYP_KEY_SIZE_256: writel_relaxed(ctx->key_4_l, ®->key_4_l); writel_relaxed(ctx->key_4_r, ®->key_4_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_192: writel_relaxed(ctx->key_3_l, ®->key_3_l); writel_relaxed(ctx->key_3_r, ®->key_3_r); - /* Fall through */ + fallthrough; case CRYP_KEY_SIZE_128: writel_relaxed(ctx->key_2_l, ®->key_2_l); writel_relaxed(ctx->key_2_r, ®->key_2_r); - /* Fall through */ + fallthrough; default: writel_relaxed(ctx->key_1_l, ®->key_1_l); diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 4c0af2eb7e19648aecf4489eebd08a60d7d2205d..1e89513f3c5962928f65200548a0a8d59ff0e81e 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -429,7 +429,7 @@ int dev_dax_probe(struct device *dev) return -EBUSY; } - dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX; + dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC; addr = devm_memremap_pages(dev, &dev_dax->pgmap); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index c82cbcb6420289f8cf529394ef1138158d32fb4c..e5767c83ea23d8a71aad9f25811c51d42e728583 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -100,6 +100,12 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, return false; } + if (!dax_dev || !bdev_dax_supported(bdev, blocksize)) { + pr_debug("%s: error: dax unsupported by block device\n", + bdevname(bdev, buf)); + return false; + } + id = dax_read_lock(); len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1699a8e309ef0d41d4ce7b8ab54c704b19ad2111..58564d82a3a22100eacc3f354ac10c6e72b6fcb2 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -316,9 +316,9 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) * name of the dma-buf if the same piece of memory is used for multiple * purpose between different devices. * - * @dmabuf [in] dmabuf buffer that will be renamed. - * @buf: [in] A piece of userspace memory that contains the name of - * the dma-buf. + * @dmabuf: [in] dmabuf buffer that will be renamed. + * @buf: [in] A piece of userspace memory that contains the name of + * the dma-buf. * * Returns 0 on success. If the dma-buf buffer is already attached to * devices, return -EBUSY. diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 3d123502ff12a8fb7845c9bf3d4193768f3836da..7d129e68ac70116b1c7169e4176a981d292bd2f4 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -222,6 +222,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops); * @chain: the chain node to initialize * @prev: the previous fence * @fence: the current fence + * @seqno: the sequence number to use for the fence chain * * Initialize a new chain node and either start a new chain or add the node to * the existing chain of the previous fence. diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 35f4804ea4afaa6ac90146b827c99edab7568783..235f1396f968606ddfee61110490de4e16463df4 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); - return; + break; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } + + acpi_put_table((struct acpi_table_header *)csrt); } /** diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 9adc7a2fa3d3b597a864b63c3d14c38f3cb7c220..a24882ba37643db58ba982a53e1da00d6873ffbb 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1767,7 +1767,7 @@ static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x) default: dev_err(&pl08x->adev->dev, "illegal burst size for memcpy, set to 1\n"); - /* Fall through */ + fallthrough; case PL08X_BURST_SZ_1: cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT | PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT; @@ -1806,7 +1806,7 @@ static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x) default: dev_err(&pl08x->adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); - /* Fall through */ + fallthrough; case PL08X_BUS_WIDTH_8_BITS: cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT | PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; @@ -1850,7 +1850,7 @@ static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x) default: dev_err(&pl08x->adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); - /* Fall through */ + fallthrough; case PL08X_BUS_WIDTH_8_BITS: cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT | PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT; @@ -2612,7 +2612,7 @@ static int pl08x_of_probe(struct amba_device *adev, switch (val) { default: dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n"); - /* Fall through */ + fallthrough; case 1: pd->memcpy_burst_size = PL08X_BURST_SZ_1; break; @@ -2647,7 +2647,7 @@ static int pl08x_of_probe(struct amba_device *adev, switch (val) { default: dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n"); - /* Fall through */ + fallthrough; case 8: pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS; break; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 45bbcd6146fd23643ed0022d0d553cd68c8e8138..a2cf25c6e3b35603d14a1c7545e137c2a3a3072f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1650,13 +1650,17 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); atslave = kmalloc(sizeof(*atslave), GFP_KERNEL); - if (!atslave) + if (!atslave) { + put_device(&dmac_pdev->dev); return NULL; + } atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; /* @@ -1685,8 +1689,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, atslave->dma_dev = &dmac_pdev->dev; chan = dma_request_channel(mask, at_dma_filter, atslave); - if (!chan) + if (!chan) { + put_device(&dmac_pdev->dev); + kfree(atslave); return NULL; + } atchan = to_at_dma_chan(chan); atchan->per_if = dma_spec->args[0] & 0xff; diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 448f663da89c69e7bd0b4d9bfa135174a28e0bc0..8beed91428bd61bacc8467f16cfb5892d39649ba 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -879,24 +879,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) return -EINVAL; } - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - - jzdma->irq = ret; - - ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), - jzdma); - if (ret) { - dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); - return ret; - } - jzdma->clk = devm_clk_get(dev, NULL); if (IS_ERR(jzdma->clk)) { dev_err(dev, "failed to get clock\n"); ret = PTR_ERR(jzdma->clk); - goto err_free_irq; + return ret; } clk_prepare_enable(jzdma->clk); @@ -949,10 +936,23 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzchan->vchan.desc_free = jz4780_dma_desc_free; } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_disable_clk; + + jzdma->irq = ret; + + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), + jzdma); + if (ret) { + dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); + goto err_disable_clk; + } + ret = dmaenginem_async_device_register(dd); if (ret) { dev_err(dev, "failed to register device\n"); - goto err_disable_clk; + goto err_free_irq; } /* Register with OF DMA helpers. */ @@ -960,17 +960,17 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzdma); if (ret) { dev_err(dev, "failed to register OF DMA controller\n"); - goto err_disable_clk; + goto err_free_irq; } dev_info(dev, "JZ4780 DMA controller initialised\n"); return 0; -err_disable_clk: - clk_disable_unprepare(jzdma->clk); - err_free_irq: free_irq(jzdma->irq, jzdma); + +err_disable_clk: + clk_disable_unprepare(jzdma->clk); return ret; } diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index ed430ad9b3dd8e1e4f096d11c9bfe1cfe6f91726..b971505b87152398cd76a16a8fb3a05fb9767c89 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -405,7 +405,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; } else { - burst->dar = sg_dma_address(sg); + burst->dar = dst_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -413,14 +413,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - src_addr += sg_dma_len(sg); } } else { burst->dar = dst_addr; if (xfer->cyclic) { burst->sar = xfer->xfer.cyclic.paddr; } else { - burst->sar = sg_dma_address(sg); + burst->sar = src_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -428,12 +427,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - dst_addr += sg_dma_len(sg); } } - if (!xfer->cyclic) + if (!xfer->cyclic) { + src_addr += sg_dma_len(sg); + dst_addr += sg_dma_len(sg); sg = sg_next(sg); + } } return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ad72b3f42ffa06d413b7d2f5a73a5faa00458a71..e342cf52d2966b4dfc62f69a9473826ece5cf1f0 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1163,7 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; - /* Fall through */ + fallthrough; case FSL_DMA_IP_83XX: chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->set_src_loop_size = fsl_chan_set_src_loop_size; diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 56f18ae99233282e52ba9fc3865ff0945b3bd921..308bed0a560acdf6016f3438f3c9e4e26c5ddebb 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -205,10 +205,10 @@ struct fsldma_chan { #else static u64 fsl_ioread64(const u64 __iomem *addr) { - u32 fsl_addr = lower_32_bits(addr); - u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32; + u32 val_lo = in_le32((u32 __iomem *)addr); + u32 val_hi = in_le32((u32 __iomem *)addr + 1); - return fsl_addr_hi | in_le32((u32 *)fsl_addr); + return ((u64)val_hi << 32) + val_lo; } static void fsl_iowrite64(u64 val, u64 __iomem *addr) @@ -219,10 +219,10 @@ static void fsl_iowrite64(u64 val, u64 __iomem *addr) static u64 fsl_ioread64be(const u64 __iomem *addr) { - u32 fsl_addr = lower_32_bits(addr); - u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32; + u32 val_hi = in_be32((u32 __iomem *)addr); + u32 val_lo = in_be32((u32 __iomem *)addr + 1); - return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1)); + return ((u64)val_hi << 32) + val_lo; } static void fsl_iowrite64be(u64 val, u64 __iomem *addr) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 14b45853aa5fcee31900c51321e5d8b534c54355..b75d699160bfad03d1ceed9702be81ec295e0b1d 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -410,10 +410,27 @@ int idxd_device_enable(struct idxd_device *idxd) return 0; } +void idxd_device_wqs_clear_state(struct idxd_device *idxd) +{ + int i; + + lockdep_assert_held(&idxd->dev_lock); + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_ENABLED) { + idxd_wq_disable_cleanup(wq); + wq->state = IDXD_WQ_DISABLED; + } + } +} + int idxd_device_disable(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; u32 status; + unsigned long flags; if (!idxd_is_enabled(idxd)) { dev_dbg(dev, "Device is not enabled\n"); @@ -429,13 +446,22 @@ int idxd_device_disable(struct idxd_device *idxd) return -ENXIO; } + spin_lock_irqsave(&idxd->dev_lock, flags); + idxd_device_wqs_clear_state(idxd); idxd->state = IDXD_DEV_CONF_READY; + spin_unlock_irqrestore(&idxd->dev_lock, flags); return 0; } void idxd_device_reset(struct idxd_device *idxd) { + unsigned long flags; + idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); + spin_lock_irqsave(&idxd->dev_lock, flags); + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_CONF_READY; + spin_unlock_irqrestore(&idxd->dev_lock, flags); } /* Device configuration bits */ diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index b5142556cc4e8648867d0d8162e1f835916801d3..1e9e6991f543d76325e46ccf0d1f6ce83c644de5 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -11,18 +11,6 @@ #include "idxd.h" #include "registers.h" -void idxd_device_wqs_clear_state(struct idxd_device *idxd) -{ - int i; - - lockdep_assert_held(&idxd->dev_lock); - for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; - - wq->state = IDXD_WQ_DISABLED; - } -} - static void idxd_device_reinit(struct work_struct *work) { struct idxd_device *idxd = container_of(work, struct idxd_device, work); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 5c0fb3134825f56ce02af12738eabcbdbfcbbacf..88717506c1f6b0453edaba53e526641ec4384170 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -556,7 +556,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) * We fall-through here intentionally, since a 2D transfer is * similar to MEMCPY just adding the 2D slot configuration. */ - /* Fall through */ + fallthrough; case IMXDMA_DESC_MEMCPY: imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h index c499c9578f0097d4f00c3ef215a7f3e80fe0f555..d44eabb6f5ebd6800bf70eba800c4b0f0d7bad47 100644 --- a/drivers/dma/iop-adma.h +++ b/drivers/dma/iop-adma.h @@ -496,7 +496,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, } hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr; src_cnt = 24; - /* fall through */ + fallthrough; case 17 ... 24: if (!u_desc_ctrl.field.blk_ctrl) { hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; @@ -510,7 +510,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, } hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr; src_cnt = 16; - /* fall through */ + fallthrough; case 9 ... 16: if (!u_desc_ctrl.field.blk_ctrl) u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ @@ -522,7 +522,7 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, } hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr; src_cnt = 8; - /* fall through */ + fallthrough; case 2 ... 8: shift = 1; for (i = 0; i < src_cnt; i++) { @@ -602,19 +602,19 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, case 25 ... 32: u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; - /* fall through */ + fallthrough; case 17 ... 24: if (!u_desc_ctrl.field.blk_ctrl) { hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0; u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */ } hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0; - /* fall through */ + fallthrough; case 9 ... 16: if (!u_desc_ctrl.field.blk_ctrl) u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */ hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0; - /* fall through */ + fallthrough; case 1 ... 8: if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4) u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 74df621402e106ef276e2756615dd9b90810b17c..ca4e0930207a03a88a9e65756425495239b8ddd1 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -483,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf, default: pr_warn("%s(): invalid bus width %u\n", __func__, width); - /* fall through */ + fallthrough; case DMA_SLAVE_BUSWIDTH_1_BYTE: size = burst; } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 863f2aaf5c8f7fa037f5303c2cf1c4af4bb5f5d4..8a4f608904b984c65bf4211aaffb4fcd9fc37b4b 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -71,12 +71,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2c508ee672b9084b7f6d333ea08732b8bc7da45d..5274a0704d96085ba6c24e4288f27dbec440c623 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1061,16 +1061,16 @@ static bool _start(struct pl330_thread *thrd) if (_state(thrd) == PL330_STATE_KILLING) UNTIL(thrd, PL330_STATE_STOPPED) - /* fall through */ + fallthrough; case PL330_STATE_FAULTING: _stop(thrd); - /* fall through */ + fallthrough; case PL330_STATE_KILLING: case PL330_STATE_COMPLETING: UNTIL(thrd, PL330_STATE_STOPPED) - /* fall through */ + fallthrough; case PL330_STATE_STOPPED: return _trigger(thrd); @@ -1121,7 +1121,6 @@ static u32 _emit_load(unsigned int dry_run, u8 buf[], switch (direction) { case DMA_MEM_TO_MEM: - /* fall through */ case DMA_MEM_TO_DEV: off += _emit_LD(dry_run, &buf[off], cond); break; @@ -1155,7 +1154,6 @@ static inline u32 _emit_store(unsigned int dry_run, u8 buf[], switch (direction) { case DMA_MEM_TO_MEM: - /* fall through */ case DMA_DEV_TO_MEM: off += _emit_ST(dry_run, &buf[off], cond); break; @@ -1216,7 +1214,6 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: - /* fall through */ case DMA_DEV_TO_MEM: off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc, cond); @@ -1266,7 +1263,6 @@ static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[], switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: - /* fall through */ case DMA_DEV_TO_MEM: off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr); off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, 1, @@ -2801,6 +2797,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. @@ -2808,7 +2805,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 2deeaab078a46f0119dd19c9adce9d2642e8d274..788d696323bbed133e935093585d9e70743e92fb 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -383,7 +383,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) switch (desc->mark) { case DESC_COMPLETED: desc->mark = DESC_WAITING; - /* Fall through */ + fallthrough; case DESC_WAITING: if (head_acked) async_tx_ack(&desc->async_tx); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index c14e6cb105cd80ed1464313d692cb7545bb3008e..d86dba0fd8e6b6cc2da03fe69d524308dbc9b446 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2059,9 +2059,9 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, return NULL; } - cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, - CPPI5_TR_EVENT_SIZE_COMPLETION, 0); - cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, + false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); tr_req[tr_idx].addr = sg_addr; tr_req[tr_idx].icnt0 = tr0_cnt0; @@ -3101,14 +3101,14 @@ static struct udma_match_data am654_main_data = { .psil_base = 0x1000, .enable_memcpy_support = true, .statictr_z_mask = GENMASK(11, 0), - .rchan_oes_offset = 0x2000, + .rchan_oes_offset = 0x200, }; static struct udma_match_data am654_mcu_data = { .psil_base = 0x6000, .enable_memcpy_support = false, .statictr_z_mask = GENMASK(11, 0), - .rchan_oes_offset = 0x2000, + .rchan_oes_offset = 0x200, }; static struct udma_match_data j721e_main_data = { diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 6262f6370c5dea9c7b7d83855e1e313ef9e50d0c..fcc08bbf6945f7ace4108f9537694fdba89f1a96 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3375,7 +3375,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) pvt->ops = &family_types[F17_M70H_CPUS].ops; break; } - /* fall through */ + fallthrough; case 0x18: fam_type = &family_types[F17_CPUS]; pvt->ops = &family_types[F17_CPUS].ops; diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index da60c29468a7cf48575ed1d8000963732ceb5728..54ebc8afc6b1bb2de7c4aecb94734816c38651f0 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -55,6 +55,8 @@ static DEFINE_SPINLOCK(ghes_lock); static bool __read_mostly force_load; module_param(force_load, bool, 0); +static bool system_scanned; + /* Memory Device - Type 17 of SMBIOS spec */ struct memdev_dmi_entry { u8 type; @@ -225,14 +227,12 @@ static void enumerate_dimms(const struct dmi_header *dh, void *arg) static void ghes_scan_system(void) { - static bool scanned; - - if (scanned) + if (system_scanned) return; dmi_walk(enumerate_dimms, &ghes_hw); - scanned = true; + system_scanned = true; } void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) @@ -631,6 +631,8 @@ void ghes_edac_unregister(struct ghes *ghes) mutex_lock(&ghes_reg_mutex); + system_scanned = false; + if (!refcount_dec_and_test(&ghes_refcount)) goto unlock; diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 5860ca41185cf1830d3aa2408f6fb36b5f7a29bc..2acd9f9284a260ac766134885b359b883734e89b 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1710,9 +1710,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) - tp_event = HW_EVENT_ERR_FATAL; - else tp_event = HW_EVENT_ERR_UNCORRECTED; + else + tp_event = HW_EVENT_ERR_FATAL; } else { tp_event = HW_EVENT_ERR_CORRECTED; } diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index fd363746f5b03aa9b606ab505d2d09dbc2087656..928f63a374c78cdc98d89ceefa5e543ce368351a 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -198,7 +198,7 @@ static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *na switch (sz) { case 8: ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4)); - /* fall through */ + fallthrough; case 4: ret |= _apl_rd_reg(port, off, op, (u32 *)data); pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, @@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, u32 optypenum = GET_BITFIELD(m->status, 4, 6); int rc; - tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) : HW_EVENT_ERR_CORRECTED; /* diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index d414698ca324213688f946aeb18d8075975a050f..c5ab634cb6a49acf129197d5d560fdc2a46653d8 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) { - tp_event = HW_EVENT_ERR_FATAL; - } else { tp_event = HW_EVENT_ERR_UNCORRECTED; + } else { + tp_event = HW_EVENT_ERR_FATAL; } } else { tp_event = HW_EVENT_ERR_CORRECTED; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 6d8d6dc626bfeec73378c64ffebc3db27902103a..2b4ce8e5ac2fa6e48721cea55e878784df57fdf4 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -493,9 +493,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, if (uncorrected_error) { core_err_cnt = 1; if (ripv) { - tp_event = HW_EVENT_ERR_FATAL; - } else { tp_event = HW_EVENT_ERR_UNCORRECTED; + } else { + tp_event = HW_EVENT_ERR_FATAL; } } else { tp_event = HW_EVENT_ERR_CORRECTED; diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index b785e936244f530eafc35ed1b804822eaf99ef6a..80db43a22069802992f0b01214df6564acbd662a 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation) device->bc_implemented = BC_IMPLEMENTED; break; } - /* else, fall through - to case address error */ + fallthrough; /* to case address error */ case RCODE_ADDRESS_ERROR: device->bc_implemented = BC_UNIMPLEMENTED; } diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 185b0b78b3d68c8d9e8b8135d3841a728602b109..af70e74f9a7e1ad21a54ee45fa968b063eadc04d 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -277,7 +277,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, if ((data[0] & bit) == (data[1] & bit)) continue; - /* fall through - It's a 1394-1995 IRM, retry. */ + fallthrough; /* It's a 1394-1995 IRM, retry */ default: if (retry) { retry--; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 94a13fca82673b4036052378b398a239a0c57ace..ec68ed27b0a5f5c72091ea5ae56c5b74088672f2 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -54,7 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) switch (port_type) { case SELFID_PORT_CHILD: (*child_port_count)++; - /* fall through */ + fallthrough; case SELFID_PORT_PARENT: case SELFID_PORT_NCONN: (*total_port_count)++; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 439d918bbaafaafcbe6cf38dda01b856d66d5f42..ac487c96bb717f9e76f2a6d092c6ff0af8515dd2 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -1097,14 +1097,14 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, rcode = RCODE_ADDRESS_ERROR; break; } - /* else fall through */ + fallthrough; case CSR_NODE_IDS: /* * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges */ - /* fall through */ + fallthrough; case CSR_STATE_CLEAR: case CSR_STATE_SET: diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7dde21b18b04c672516ceb14aaa1cc606ff17bcc..020cb15a4d8fccf3705616544275df560c8923a5 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1495,7 +1495,7 @@ static int handle_at_packet(struct context *context, packet->ack = RCODE_GENERATION; break; } - /* fall through */ + fallthrough; default: packet->ack = RCODE_SEND_ERROR; @@ -3054,7 +3054,7 @@ static int ohci_start_iso(struct fw_iso_context *base, case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; - /* fall through */ + fallthrough; case FW_ISO_CONTEXT_RECEIVE: index = ctx - ohci->ir_context_list; match = (tags << 28) | (sync << 8) | ctx->base.channel; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index fdd1db025dbfda2c0a4e5f63b9d3e24fa95cfeb9..3aa07c3b5136979d8bcff667dae07cdb12265a14 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -381,6 +381,7 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); + destroy_workqueue(efi_rts_wq); return -ENOMEM; } @@ -424,6 +425,7 @@ static int __init efisubsys_init(void) generic_ops_unregister(); err_put: kobject_put(efi_kobj); + destroy_workqueue(efi_rts_wq); return error; } diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c index e97a9c9d010c82d67951f81b78807c661ec54594..21ae0c48232a118bac79cc4a7783eb30700b4478 100644 --- a/drivers/firmware/efi/embedded-firmware.c +++ b/drivers/firmware/efi/embedded-firmware.c @@ -16,9 +16,9 @@ /* Exported for use by lib/test_firmware.c only */ LIST_HEAD(efi_embedded_fw_list); -EXPORT_SYMBOL_GPL(efi_embedded_fw_list); - -static bool checked_for_fw; +EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE); +bool efi_embedded_fw_checked; +EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE); static const struct dmi_system_id * const embedded_fw_table[] = { #ifdef CONFIG_TOUCHSCREEN_DMI @@ -116,14 +116,14 @@ void __init efi_check_for_embedded_firmwares(void) } } - checked_for_fw = true; + efi_embedded_fw_checked = true; } int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size) { struct efi_embedded_fw *iter, *fw = NULL; - if (!checked_for_fw) { + if (!efi_embedded_fw_checked) { pr_warn("Warning %s called while we did not check for embedded fw\n", __func__); return -ENOENT; diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 6bca70bbb43d0db1689fd57c7660c013d5c5e20e..f735db55adc037fff5b14f8326b197fb568b9fb2 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -187,20 +187,28 @@ int efi_printk(const char *fmt, ...) */ efi_status_t efi_parse_options(char const *cmdline) { - size_t len = strlen(cmdline) + 1; + size_t len; efi_status_t status; char *str, *buf; + if (!cmdline) + return EFI_SUCCESS; + + len = strnlen(cmdline, COMMAND_LINE_SIZE - 1) + 1; status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf); if (status != EFI_SUCCESS) return status; - str = skip_spaces(memcpy(buf, cmdline, len)); + memcpy(buf, cmdline, len - 1); + buf[len - 1] = '\0'; + str = skip_spaces(buf); while (*str) { char *param, *val; str = next_arg(str, ¶m, &val); + if (!val && !strcmp(param, "--")) + break; if (!strcmp(param, "nokaslr")) { efi_nokaslr = true; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 53cee17d011584f5621e3ab42ed7d094cbba9934..722af9ee53d65d26fd72a059cc661536d5a0a97a 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -64,22 +64,6 @@ struct ti_sci_xfers_info { spinlock_t xfer_lock; }; -/** - * struct ti_sci_rm_type_map - Structure representing TISCI Resource - * management representation of dev_ids. - * @dev_id: TISCI device ID - * @type: Corresponding id as identified by TISCI RM. - * - * Note: This is used only as a work around for using RM range apis - * for AM654 SoC. For future SoCs dev_id will be used as type - * for RM range APIs. In order to maintain ABI backward compatibility - * type is not being changed for AM654 SoC. - */ -struct ti_sci_rm_type_map { - u32 dev_id; - u16 type; -}; - /** * struct ti_sci_desc - Description of SoC integration * @default_host_id: Host identifier representing the compute entity @@ -87,14 +71,12 @@ struct ti_sci_rm_type_map { * @max_msgs: Maximum number of messages that can be pending * simultaneously in the system * @max_msg_size: Maximum size of data per message that can be handled. - * @rm_type_map: RM resource type mapping structure. */ struct ti_sci_desc { u8 default_host_id; int max_rx_timeout_ms; int max_msgs; int max_msg_size; - struct ti_sci_rm_type_map *rm_type_map; }; /** @@ -1710,33 +1692,6 @@ static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) return ret; } -static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id, - u16 *type) -{ - struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map; - bool found = false; - int i; - - /* If map is not provided then assume dev_id is used as type */ - if (!rm_type_map) { - *type = dev_id; - return 0; - } - - for (i = 0; rm_type_map[i].dev_id; i++) { - if (rm_type_map[i].dev_id == dev_id) { - *type = rm_type_map[i].type; - found = true; - break; - } - } - - if (!found) - return -EINVAL; - - return 0; -} - /** * ti_sci_get_resource_range - Helper to get a range of resources assigned * to a host. Resource is uniquely identified by @@ -1760,7 +1715,6 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, struct ti_sci_xfer *xfer; struct ti_sci_info *info; struct device *dev; - u16 type; int ret = 0; if (IS_ERR(handle)) @@ -1780,15 +1734,9 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, return ret; } - ret = ti_sci_get_resource_type(info, dev_id, &type); - if (ret) { - dev_err(dev, "rm type lookup failed for %u\n", dev_id); - goto fail; - } - req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf; req->secondary_host = s_host; - req->type = type & MSG_RM_RESOURCE_TYPE_MASK; + req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK; req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; ret = ti_sci_do_xfer(info, xfer); @@ -3260,61 +3208,50 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res) EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); /** - * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device + * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device * @handle: TISCI handle * @dev: Device pointer to which the resource is assigned * @dev_id: TISCI device id to which the resource is assigned - * @of_prop: property name by which the resource are represented + * @sub_types: Array of sub_types assigned corresponding to device + * @sets: Number of sub_types * * Return: Pointer to ti_sci_resource if all went well else appropriate * error pointer. */ -struct ti_sci_resource * -devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, - struct device *dev, u32 dev_id, char *of_prop) +static struct ti_sci_resource * +devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle, + struct device *dev, u32 dev_id, u32 *sub_types, + u32 sets) { struct ti_sci_resource *res; bool valid_set = false; - u32 resource_subtype; int i, ret; res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); if (!res) return ERR_PTR(-ENOMEM); - ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop, - sizeof(u32)); - if (ret < 0) { - dev_err(dev, "%s resource type ids not available\n", of_prop); - return ERR_PTR(ret); - } - res->sets = ret; - + res->sets = sets; res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), GFP_KERNEL); if (!res->desc) return ERR_PTR(-ENOMEM); for (i = 0; i < res->sets; i++) { - ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i, - &resource_subtype); - if (ret) - return ERR_PTR(-EINVAL); - ret = handle->ops.rm_core_ops.get_range(handle, dev_id, - resource_subtype, + sub_types[i], &res->desc[i].start, &res->desc[i].num); if (ret) { dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n", - dev_id, resource_subtype); + dev_id, sub_types[i]); res->desc[i].start = 0; res->desc[i].num = 0; continue; } dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n", - dev_id, resource_subtype, res->desc[i].start, + dev_id, sub_types[i], res->desc[i].start, res->desc[i].num); valid_set = true; @@ -3332,6 +3269,62 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, return ERR_PTR(-EINVAL); } +/** + * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device + * @handle: TISCI handle + * @dev: Device pointer to which the resource is assigned + * @dev_id: TISCI device id to which the resource is assigned + * @of_prop: property name by which the resource are represented + * + * Return: Pointer to ti_sci_resource if all went well else appropriate + * error pointer. + */ +struct ti_sci_resource * +devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, + struct device *dev, u32 dev_id, char *of_prop) +{ + struct ti_sci_resource *res; + u32 *sub_types; + int sets; + + sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, + sizeof(u32)); + if (sets < 0) { + dev_err(dev, "%s resource type ids not available\n", of_prop); + return ERR_PTR(sets); + } + + sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL); + if (!sub_types) + return ERR_PTR(-ENOMEM); + + of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets); + res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types, + sets); + + kfree(sub_types); + return res; +} +EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource); + +/** + * devm_ti_sci_get_resource() - Get a resource range assigned to the device + * @handle: TISCI handle + * @dev: Device pointer to which the resource is assigned + * @dev_id: TISCI device id to which the resource is assigned + * @suub_type: TISCI resource subytpe representing the resource. + * + * Return: Pointer to ti_sci_resource if all went well else appropriate + * error pointer. + */ +struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type) +{ + return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1); +} +EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); + static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, void *cmd) { @@ -3352,17 +3345,6 @@ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ .max_msgs = 20, .max_msg_size = 64, - .rm_type_map = NULL, -}; - -static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = { - {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */ - {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */ - {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */ - {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */ - {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */ - {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */ - {.dev_id = 0, .type = 0x000}, /* end of table */ }; /* Description for AM654 */ @@ -3373,7 +3355,6 @@ static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ .max_msgs = 20, .max_msg_size = 60, - .rm_type_map = ti_sci_am654_rm_type_map, }; static const struct of_device_id ti_sci_of_match[] = { diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index d16645c1d8d9de447d148d0d7b2851fac7583ae4..3aa45934d60c4184a5159894d2c1631c37f427d7 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -303,16 +303,16 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type) switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: type2 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_RISING: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_FALLING: handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_LEVEL_LOW: type1 |= bit; handler = handle_level_irq; diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 879db23d84549313bde94846e29450d9bf180432..bf08b4561f3616b53cd957231fffa1486dfefbe9 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -611,16 +611,16 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_BOTH: type2 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_RISING: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_FALLING: handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: type0 |= bit; - /* fall through */ + fallthrough; case IRQ_TYPE_LEVEL_LOW: type1 |= bit; handler = handle_level_irq; diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index 53fae02c40ad8299c7230e6792771924ce991314..d5359341cc6b2f6c4444ae1aec4cc08cdef0758c 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -129,7 +129,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data, case IRQ_TYPE_LEVEL_HIGH: polarity |= mask; - /* fall through */ + fallthrough; case IRQ_TYPE_LEVEL_LOW: type |= mask; break; diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index 8c97577740100914242bbf5cdc56aca8ca61163a..ad61daf6c2125f9a54cc6696f8f3add5818da96e 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -617,14 +617,12 @@ static int sprd_eic_probe(struct platform_device *pdev) sprd_eic->chip.free = sprd_eic_free; sprd_eic->chip.set_config = sprd_eic_set_config; sprd_eic->chip.set = sprd_eic_set; - /* fall-through */ + fallthrough; case SPRD_EIC_ASYNC: - /* fall-through */ case SPRD_EIC_SYNC: sprd_eic->chip.get = sprd_eic_get; break; case SPRD_EIC_LATCH: - /* fall-through */ default: break; } diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 6c48809d0505b35bdb455eb23d96981cb28c8e04..b0155d6007c81a3ee3fc71fc73c309f299667876 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -308,7 +308,7 @@ static void stmpe_dbg_show_one(struct seq_file *s, if (ret < 0) return; edge_det = !!(ret & mask); - /* fall through */ + fallthrough; case STMPE1801: rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank]; fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank]; @@ -321,7 +321,7 @@ static void stmpe_dbg_show_one(struct seq_file *s, if (ret < 0) return; fall = !!(ret & mask); - /* fall through */ + fallthrough; case STMPE801: case STMPE1600: irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank]; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 9276051663dade53683f3d1cd6065f949f3db537..54ca3c18b291cafe75021e3e0ae4437ea60bcac2 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1264,7 +1264,7 @@ static int acpi_gpio_package_count(const union acpi_object *obj) switch (element->type) { case ACPI_TYPE_LOCAL_REFERENCE: element += 3; - /* Fallthrough */ + fallthrough; case ACPI_TYPE_INTEGER: element++; count++; diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 403ec3db29df50cc5ac38c4dc4bcbf619ca97c75..39976c7b100c97117d24d2ae856ffbf4912898b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -30,7 +30,7 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME) ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ -I$(FULL_AMD_PATH)/amdgpu \ - -I$(FULL_AMD_PATH)/powerplay/inc \ + -I$(FULL_AMD_PATH)/pm/inc \ -I$(FULL_AMD_PATH)/acp/include \ -I$(FULL_AMD_DISPLAY_PATH) \ -I$(FULL_AMD_DISPLAY_PATH)/include \ @@ -47,7 +47,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ - amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ + atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ @@ -55,15 +55,15 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ - amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o + amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o # add asic specific block -amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ +amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \ dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o -amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o \ +amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \ uvd_v3_1.o amdgpu-y += \ @@ -85,7 +85,7 @@ amdgpu-y += \ # add UMC block amdgpu-y += \ - umc_v6_1.o umc_v6_0.o + umc_v6_1.o umc_v6_0.o umc_v8_7.o # add IH block amdgpu-y += \ @@ -105,10 +105,6 @@ amdgpu-y += \ psp_v11_0.o \ psp_v12_0.o -# add SMC block -amdgpu-y += \ - amdgpu_dpm.o - # add DCE block amdgpu-y += \ dce_v10_0.o \ @@ -212,7 +208,7 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o -include $(FULL_AMD_PATH)/powerplay/Makefile +include $(FULL_AMD_PATH)/pm/Makefile amdgpu-y += $(AMD_POWERPLAY_FILES) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 327a0daf4a1dce932d29ac3ee6b0b2a9d51b4ad4..4009d2e30727556b0ef4a7418b5cfb7d7afa9b5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -178,6 +178,7 @@ extern uint amdgpu_dm_abm_level; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; +extern int amdgpu_bad_page_threshold; extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; extern int amdgpu_discovery; @@ -187,9 +188,11 @@ extern int amdgpu_force_asic_type; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; +extern bool no_system_mem_limit; #else static const int sched_policy = KFD_SCHED_POLICY_HWS; static const bool debug_evictions; /* = false */ +static const bool no_system_mem_limit; #endif extern int amdgpu_tmz; @@ -201,6 +204,7 @@ extern int amdgpu_si_support; #ifdef CONFIG_DRM_AMDGPU_CIK extern int amdgpu_cik_support; #endif +extern int amdgpu_num_kcq; #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) @@ -212,6 +216,8 @@ extern int amdgpu_cik_support; #define AMDGPUFB_CONN_LIMIT 4 #define AMDGPU_BIOS_NUM_SCRATCH 16 +#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ + /* hard reset data */ #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b @@ -245,6 +251,7 @@ struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; struct amdgpu_atif; struct kfd_vm_fault_info; +struct amdgpu_hive_info; enum amdgpu_cp_irq { AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, @@ -611,6 +618,8 @@ struct amdgpu_asic_funcs { uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); /* device supports BACO */ bool (*supports_baco)(struct amdgpu_device *adev); + /* pre asic_init quirks */ + void (*pre_asic_init)(struct amdgpu_device *adev); }; /* @@ -647,16 +656,6 @@ struct amdgpu_atcs { struct amdgpu_atcs_functions functions; }; -/* - * Firmware VRAM reservation - */ -struct amdgpu_fw_vram_usage { - u64 start_offset; - u64 size; - struct amdgpu_bo *reserved_bo; - void *va; -}; - /* * CGS */ @@ -725,13 +724,13 @@ struct amd_powerplay { #define AMDGPU_MAX_DF_PERFMONS 4 struct amdgpu_device { struct device *dev; - struct drm_device *ddev; struct pci_dev *pdev; + struct drm_device ddev; #ifdef CONFIG_DRM_AMD_ACP struct amdgpu_acp acp; #endif - + struct amdgpu_hive_info *hive; /* ASIC */ enum amd_asic_type asic_type; uint32_t family; @@ -765,7 +764,6 @@ struct amdgpu_device { bool is_atom_fw; uint8_t *bios; uint32_t bios_size; - struct amdgpu_bo *stolen_vga_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -917,11 +915,6 @@ struct amdgpu_device { /* display related functionality */ struct amdgpu_display_manager dm; - /* discovery */ - uint8_t *discovery_bin; - uint32_t discovery_tmr_size; - struct amdgpu_bo *discovery_memory; - /* mes */ bool enable_mes; struct amdgpu_mes mes; @@ -946,8 +939,6 @@ struct amdgpu_device { struct delayed_work delayed_init_work; struct amdgpu_virt virt; - /* firmware VRAM reservation */ - struct amdgpu_fw_vram_usage fw_vram_usage; /* link all shadow bo */ struct list_head shadow_list; @@ -961,9 +952,9 @@ struct amdgpu_device { bool in_suspend; bool in_hibernate; - bool in_gpu_reset; + atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; - struct mutex lock_reset; + struct rw_semaphore reset_sem; struct amdgpu_doorbell_index doorbell_index; struct mutex notifier_lock; @@ -995,16 +986,25 @@ struct amdgpu_device { atomic_t throttling_logging_enabled; struct ratelimit_state throttling_logging_rs; + uint32_t ras_features; }; +static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) +{ + return container_of(ddev, struct amdgpu_device, ddev); +} + +static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) +{ + return &adev->ddev; +} + static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) { return container_of(bdev, struct amdgpu_device, mman.bdev); } int amdgpu_device_init(struct amdgpu_device *adev, - struct drm_device *ddev, - struct pci_dev *pdev, uint32_t flags); void amdgpu_device_fini(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); @@ -1141,10 +1141,12 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) +#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev)) #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); /* Common functions */ +bool amdgpu_device_has_job_running(struct amdgpu_device *adev); bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); @@ -1194,7 +1196,7 @@ static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; extern const int amdgpu_max_kms_ioctl; -int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); +int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); void amdgpu_driver_unload_kms(struct drm_device *dev); void amdgpu_driver_lastclose_kms(struct drm_device *dev); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); @@ -1278,4 +1280,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) return adev->gmc.tmz_enabled; } +static inline int amdgpu_in_reset(struct amdgpu_device *adev) +{ + return atomic_read(&adev->in_gpu_reset); +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 12247a32f9ef94a1c3a23e81f16341143314a218..d3e51d361179e47d84b25fb737ba311bd824ff6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -136,9 +136,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd) * 2. power off the acp tiles * 3. check and enter ulv state */ - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; } @@ -157,8 +155,7 @@ static int acp_poweron(struct generic_pm_domain *genpd) * 2. turn on acp clock * 3. power on acp tiles */ - if (adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); } return 0; } @@ -529,9 +526,7 @@ static int acp_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_PG_STATE_GATE); - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 913c8f0513bd3cee35fb7ad95b9256bdf5219177..4a93b880c6bfb2d0de29c02688d327912eac8e76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -463,11 +463,11 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { if (adev->flags & AMD_IS_PX) { - pm_runtime_get_sync(adev->ddev->dev); + pm_runtime_get_sync(adev_to_drm(adev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ - drm_helper_hpd_irq_event(adev->ddev); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + drm_helper_hpd_irq_event(adev_to_drm(adev)); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } } /* TODO: check other events */ @@ -817,7 +817,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) struct drm_encoder *tmp; /* Find the encoder controlling the brightness */ - list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list, + list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list, head) { struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 478f67498a1730d974fe9215ca148452ca5d248b..edff1b7f282a755b97d22e3eb13d2b43ab12259e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -119,7 +119,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) .gpuvm_size = min(adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GMC_HOLE_START), - .drm_render_minor = adev->ddev->render->index, + .drm_render_minor = adev_to_drm(adev)->render->index, .sdma_doorbell_idx = adev->doorbell_index.sdma_engine, }; @@ -160,7 +160,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->doorbell_index.last_non_cp; } - kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources); + kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources); } } @@ -479,11 +479,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, goto out_put; obj = dma_buf->priv; - if (obj->dev->driver != adev->ddev->driver) + if (obj->dev->driver != adev_to_drm(adev)->driver) /* Can't handle buffers from different drivers */ goto out_put; - adev = obj->dev->dev_private; + adev = drm_to_adev(obj->dev); bo = gem_to_amdgpu_bo(obj); if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT))) @@ -613,6 +613,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, job->vmid = vmid; ret = amdgpu_ib_schedule(ring, 1, ib, job, &f); + if (ret) { DRM_ERROR("amdgpu: failed to schedule IB.\n"); goto err_ib_sched; @@ -756,4 +757,8 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { } + +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) +{ +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ffe149aafc39330f07c57cff47ace2aa5dd22e35..a10507ecb75082f531f5e6c53808067ca4944afa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -270,5 +270,6 @@ int kgd2kfd_resume_mm(struct mm_struct *mm); int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, struct dma_fence *fence); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask); #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 35d4a5ab0228085936d4b92911377de42db5e3b7..1afa8f122e7d3b261115a5c628e50975d4490bdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -283,22 +283,6 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) -{ - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { - pr_err("trying to set page table base for wrong VMID %u\n", - vmid); - return; - } - - mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); - - gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); -} - const struct kfd2kgd_calls arcturus_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -317,7 +301,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, - .get_hive_id = amdgpu_amdkfd_get_hive_id, + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, + .set_vm_context_page_table_base = + kgd_gfx_v9_set_vm_context_page_table_base, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index bf927f432506dc2cedac50fa387f15d6589d6b16..df0aab0fc67e635df79cf8ae21a1186fbefbfe94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -542,7 +542,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; #if 0 @@ -776,6 +776,4 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = set_vm_context_page_table_base, - .get_hive_id = amdgpu_amdkfd_get_hive_id, - .get_unique_id = amdgpu_amdkfd_get_unique_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 7e59e473a190103e82568ffd72b20d5526a838fe..5b38f848b77296d5730ad4affaddf7cc30f11bdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -152,7 +152,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, dev_warn(adev->dev, "Invalid sdma engine id (%d), using engine id 0\n", engine_id); - /* fall through */ + fallthrough; case 0: sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; @@ -822,7 +822,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .address_watch_get_offset = address_watch_get_offset_v10_3, .get_atc_vmid_pasid_mapping_info = NULL, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, - .get_hive_id = amdgpu_amdkfd_get_hive_id, #if 0 .enable_debug_trap = enable_debug_trap_v10_3, .disable_debug_trap = disable_debug_trap_v10_3, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 744366c7ee85d33df316c9cc1586271846ea239d..275f20399373ff2eeec2371a22156f35fd92edcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -423,7 +423,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, unsigned long flags, end_jiffies; int retry; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; acquire_queue(kgd, pipe_id, queue_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index feab4cc6e836765eb15d15a77715ddfd82b3d87e..4997189d8b36869f6aa8faaf9d01269fe2cab8a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -419,7 +419,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, int retry; struct vi_mqd *m = get_mqd(mqd); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; acquire_queue(kgd, pipe_id, queue_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index c7fd0c47b254539028f12e0f0280d84165905db5..e6aede72519784ec8c432236a2beae301ddf91fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t sdma_engine_reg_base[2] = { - SOC15_REG_OFFSET(SDMA0, 0, - mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA1, 0, - mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL - }; - uint32_t retval = sdma_engine_reg_base[engine_id] + uint32_t sdma_engine_reg_base = 0; + uint32_t sdma_rlc_reg_offset; + + switch (engine_id) { + default: + dev_warn(adev->dev, + "Invalid sdma engine id (%d), using engine id 0\n", + engine_id); + fallthrough; + case 0: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 1: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + } + + sdma_rlc_reg_offset = sdma_engine_reg_base + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, - queue_id, retval); + queue_id, sdma_rlc_reg_offset); - return retval; + return sdma_rlc_reg_offset; } static inline struct v9_mqd *get_mqd(void *mqd) @@ -539,7 +552,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v9_mqd *m = get_mqd(mqd); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; acquire_queue(kgd, pipe_id, queue_id); @@ -677,7 +690,7 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, return 0; } -static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -688,7 +701,7 @@ static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, return; } - mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); + adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } @@ -713,6 +726,4 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, - .get_hive_id = amdgpu_amdkfd_get_hive_id, - .get_unique_id = amdgpu_amdkfd_get_unique_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index aedf67d57449caef06227d9d0e70bda29bbbe45d..f098e88d3a0d379190c8b486b43d162cb88c1738 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -60,3 +60,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, uint8_t vmid, uint16_t *p_pasid); + +void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, + uint32_t vmid, uint64_t page_table_base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index aa2b328c62024fca9372c36b075631e3a6d199b6..b6b821500d30a904f7be237a5acbcc20f7d69fe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -148,8 +148,12 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, spin_lock(&kfd_mem_limit.mem_limit_lock); + if (kfd_mem_limit.system_mem_used + system_mem_needed > + kfd_mem_limit.max_system_mem_limit) + pr_debug("Set no_system_mem_limit=1 if using shared memory\n"); + if ((kfd_mem_limit.system_mem_used + system_mem_needed > - kfd_mem_limit.max_system_mem_limit) || + kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || (adev->kfd.vram_used + vram_needed > @@ -1668,7 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, return -EINVAL; obj = dma_buf->priv; - if (obj->dev->dev_private != adev) + if (drm_to_adev(obj->dev) != adev) /* Can't handle buffers from other devices */ return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 29f767e026e4995441de5161c7b977c553fbf492..469352e2d6ecffff3dfb36480c0ae0c5f3c9eb8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -148,7 +148,7 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev) if (i2c.valid) { sprintf(stmp, "0x%x", i2c.i2c_id); - adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp); + adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp); } gpio = (ATOM_GPIO_I2C_ASSIGMENT *) ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); @@ -541,7 +541,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * } } - amdgpu_link_encoder_connector(adev->ddev); + amdgpu_link_encoder_connector(adev_to_drm(adev)); return true; } @@ -1786,9 +1786,9 @@ static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev) (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { /* Firmware request VRAM reservation for SR-IOV */ - adev->fw_vram_usage.start_offset = (start_addr & + adev->mman.fw_vram_usage_start_offset = (start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; - adev->fw_vram_usage.size = size << 10; + adev->mman.fw_vram_usage_size = size << 10; /* Use the default scratch size */ usage_bytes = 0; } else { @@ -1882,7 +1882,7 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) */ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) { - struct amdgpu_device *adev = info->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(info->dev); WREG32(reg, val); } @@ -1898,7 +1898,7 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) */ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) { - struct amdgpu_device *adev = info->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(info->dev); uint32_t r; r = RREG32(reg); @@ -1916,7 +1916,7 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) */ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) { - struct amdgpu_device *adev = info->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(info->dev); WREG32_IO(reg, val); } @@ -1932,7 +1932,7 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) */ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) { - struct amdgpu_device *adev = info->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(info->dev); uint32_t r; r = RREG32_IO(reg); @@ -1944,7 +1944,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct atom_context *ctx = adev->mode_info.atom_context; return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); @@ -1995,7 +1995,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) return -ENOMEM; adev->mode_info.atom_card_info = atom_card_info; - atom_card_info->dev = adev->ddev; + atom_card_info->dev = adev_to_drm(adev); atom_card_info->reg_read = cail_reg_read; atom_card_info->reg_write = cail_reg_write; /* needed for iio ops */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 1279053324f99e7e5f85cdaae69dbdfbb97592ea..17c010d0431fbaa99147c49c346f191031a09265 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -89,9 +89,9 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { /* Firmware request VRAM reservation for SR-IOV */ - adev->fw_vram_usage.start_offset = (start_addr & + adev->mman.fw_vram_usage_start_offset = (start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; - adev->fw_vram_usage.size = size << 10; + adev->mman.fw_vram_usage_size = size << 10; /* Use the default scratch size */ usage_bytes = 0; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index b1172d93c99c37db320573cd54ff69dbfcc7a80b..6333cada1e096229bdee93af87d7c7d6d17e91e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -417,26 +417,40 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) bool amdgpu_get_bios(struct amdgpu_device *adev) { - if (amdgpu_atrm_get_bios(adev)) + if (amdgpu_atrm_get_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ATRM\n"); goto success; + } - if (amdgpu_acpi_vfct_bios(adev)) + if (amdgpu_acpi_vfct_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VFCT\n"); goto success; + } - if (igp_read_bios_from_vram(adev)) + if (igp_read_bios_from_vram(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n"); goto success; + } - if (amdgpu_read_bios(adev)) + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); goto success; + } - if (amdgpu_read_bios_from_rom(adev)) + if (amdgpu_read_bios_from_rom(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM\n"); goto success; + } - if (amdgpu_read_disabled_bios(adev)) + if (amdgpu_read_disabled_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n"); goto success; + } - if (amdgpu_read_platform_bios(adev)) + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); goto success; + } DRM_ERROR("Unable to locate a BIOS ROM\n"); return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 4053597b3af2b6dd24085f3f13ac9897b4bf2322..15c45b2a398350eed6b9c6546fab6e1e951ed282 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -265,7 +265,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 93160a849af4b22e072d3d746159bf67321d6f88..65d1b23d7e746727358216f36f967a2163f2575c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -42,7 +42,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); /* bail if the connector does not have hpd pin, e.g., @@ -280,7 +280,7 @@ amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev) static void amdgpu_connector_get_edid(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->edid) @@ -464,7 +464,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector, uint64_t val) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; @@ -835,7 +835,7 @@ static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* XXX check mode bandwidth */ @@ -942,7 +942,7 @@ static bool amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); enum drm_connector_status status; @@ -973,7 +973,7 @@ static enum drm_connector_status amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); const struct drm_encoder_helper_funcs *encoder_funcs; int r; @@ -1160,7 +1160,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); /* XXX check mode bandwidth */ @@ -1312,7 +1312,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector) bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if ((adev->clock.default_dispclk >= 53900) && amdgpu_connector_encoder_is_hbr2(connector)) { @@ -1326,7 +1326,7 @@ static enum drm_connector_status amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); enum drm_connector_status ret = connector_status_disconnected; struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv; @@ -1526,7 +1526,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, struct amdgpu_hpd *hpd, struct amdgpu_router *router) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ecd051976bce8d82790e4a35ccd6cc0677765d93..12598a4b5c788f5473d08aad318e1aa1a0b35508 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1275,13 +1275,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, return r; } +static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser) +{ + int i; + + if (!trace_amdgpu_cs_enabled()) + return; + + for (i = 0; i < parser->job->num_ibs; i++) + trace_amdgpu_cs(parser, i); +} + int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_cs *cs = data; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; - int i, r; + int r; if (amdgpu_ras_intr_triggered()) return -EHWPOISON; @@ -1294,7 +1305,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_parser_init(&parser, data); if (r) { - DRM_ERROR("Failed to initialize parser %d!\n", r); + if (printk_ratelimit()) + DRM_ERROR("Failed to initialize parser %d!\n", r); goto out; } @@ -1319,8 +1331,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) reserved_buffers = true; - for (i = 0; i < parser.job->num_ibs; i++) - trace_amdgpu_cs(&parser, i); + trace_amdgpu_cs_ibs(&parser); r = amdgpu_cs_vm_handling(&parser); if (r) @@ -1421,7 +1432,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_fence_to_handle *info = data; struct dma_fence *fence; struct drm_syncobj *syncobj; @@ -1597,7 +1608,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union drm_amdgpu_wait_fences *wait = data; uint32_t fence_count = wait->in.fence_count; struct drm_amdgpu_fence *fences_user; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 8842c55d4490b3fdb57ffda94a7cfee219bfc14d..c80d8339f58c4726999de96a0b4a53b422efa156 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) return -EINVAL; /* NORMAL and below are accessible by everyone */ @@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) { switch (prio) { - case DRM_SCHED_PRIORITY_HIGH_HW: + case DRM_SCHED_PRIORITY_HIGH: case DRM_SCHED_PRIORITY_KERNEL: return AMDGPU_GFX_PIPE_PRIO_HIGH; default: @@ -114,7 +114,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, scheds = adev->gpu_sched[hw_ip][hw_prio].sched; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; - if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) { + /* disable load balance if the hw engine retains context among dependent jobs */ + if (hw_ip == AMDGPU_HW_IP_VCN_ENC || + hw_ip == AMDGPU_HW_IP_VCN_DEC || + hw_ip == AMDGPU_HW_IP_UVD_ENC || + hw_ip == AMDGPU_HW_IP_UVD) { sched = drm_sched_pick_best(scheds, num_scheds); scheds = &sched; num_scheds = 1; @@ -385,16 +389,15 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, enum drm_sched_priority priority; union drm_amdgpu_ctx *args = data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; - r = 0; id = args->in.ctx_id; - priority = amdgpu_to_sched_priority(args->in.priority); + r = amdgpu_to_sched_priority(args->in.priority, &priority); /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (priority == DRM_SCHED_PRIORITY_INVALID) + if (r == -EINVAL) priority = DRM_SCHED_PRIORITY_NORMAL; switch (args->in.op) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 193ffdb957b67f194aaf400dc4cc7d7f23a7b685..abe0c2729e1cfb3f10d74ecb5825d7f3525dfbf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -34,6 +34,7 @@ #include "amdgpu_pm.h" #include "amdgpu_dm_debugfs.h" #include "amdgpu_ras.h" +#include "amdgpu_rap.h" /** * amdgpu_debugfs_add_files - Add simple debugfs entries @@ -68,8 +69,8 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, adev->debugfs_count = i; #if defined(CONFIG_DEBUG_FS) drm_debugfs_create_files(files, nfiles, - adev->ddev->primary->debugfs_root, - adev->ddev->primary); + adev_to_drm(adev)->primary->debugfs_root, + adev_to_drm(adev)->primary); #endif return 0; } @@ -100,14 +101,18 @@ static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file) file->private_data = adev; - mutex_lock(&adev->lock_reset); + ret = down_read_killable(&adev->reset_sem); + if (ret) + return ret; + if (adev->autodump.dumping.done) { reinit_completion(&adev->autodump.dumping); ret = 0; } else { ret = -EBUSY; } - mutex_unlock(&adev->lock_reset); + + up_read(&adev->reset_sem); return ret; } @@ -126,7 +131,7 @@ static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_ poll_wait(file, &adev->autodump.gpu_hang, poll_table); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return POLLIN | POLLRDNORM | POLLWRNORM; return 0; @@ -146,7 +151,7 @@ static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev) init_waitqueue_head(&adev->autodump.gpu_hang); debugfs_create_file("amdgpu_autodump", 0600, - adev->ddev->primary->debugfs_root, + adev_to_drm(adev)->primary->debugfs_root, adev, &autodump_debug_fops); } @@ -222,23 +227,23 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, *pos &= (1UL << 22) - 1; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; } @@ -287,8 +292,8 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -335,15 +340,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -353,8 +358,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, value = RREG32_PCIE(*pos >> 2); r = put_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -365,8 +370,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -394,15 +399,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -411,8 +416,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user r = get_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -425,8 +430,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -454,15 +459,15 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -472,8 +477,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, value = RREG32_DIDT(*pos >> 2); r = put_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -484,8 +489,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -513,15 +518,15 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -530,8 +535,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user r = get_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -544,8 +549,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -573,15 +578,15 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -591,8 +596,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, value = RREG32_SMC(*pos); r = put_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -603,8 +608,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -632,15 +637,15 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -649,8 +654,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * r = get_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; } @@ -663,8 +668,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return result; @@ -791,22 +796,22 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, valuesize = sizeof(values); - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) { amdgpu_virt_disable_access_debugfs(adev); @@ -873,15 +878,15 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, wave = (*pos & GENMASK_ULL(36, 31)) >> 31; simd = (*pos & GENMASK_ULL(44, 37)) >> 37; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_virt_enable_access_debugfs(adev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -896,8 +901,8 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { amdgpu_virt_disable_access_debugfs(adev); @@ -971,7 +976,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, if (!data) return -ENOMEM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) goto err; @@ -994,8 +999,8 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); while (size) { uint32_t value; @@ -1017,7 +1022,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, return result; err: - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); kfree(data); return r; } @@ -1042,9 +1047,9 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -1053,8 +1058,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu r = get_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -1066,8 +1071,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return result; } @@ -1091,7 +1096,7 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) return r; @@ -1100,15 +1105,15 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, r = amdgpu_get_gfx_off_status(adev, &value); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = put_user(value, (uint32_t *)buf); if (r) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -1118,8 +1123,8 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, size -= 4; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return result; } @@ -1211,7 +1216,7 @@ static const char *debugfs_regs_names[] = { */ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { - struct drm_minor *minor = adev->ddev->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *ent, *root = minor->debugfs_root; unsigned int i; @@ -1231,17 +1236,19 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int r = 0, i; r = pm_runtime_get_sync(dev->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } /* Avoid accidently unparking the sched thread during GPU reset */ - mutex_lock(&adev->lock_reset); + r = down_read_killable(&adev->reset_sem); + if (r) + return r; /* hold on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { @@ -1268,7 +1275,7 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) kthread_unpark(ring->sched.thread); } - mutex_unlock(&adev->lock_reset); + up_read(&adev->reset_sem); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1280,7 +1287,7 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); seq_write(m, adev->bios, adev->bios_size); return 0; @@ -1290,12 +1297,12 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int r; r = pm_runtime_get_sync(dev->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -1311,12 +1318,12 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int r; r = pm_runtime_get_sync(dev->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -1458,7 +1465,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) return -ENOMEM; /* Avoid accidently unparking the sched thread during GPU reset */ - mutex_lock(&adev->lock_reset); + r = down_read_killable(&adev->reset_sem); + if (r) + goto pro_end; /* stop the scheduler */ kthread_park(ring->sched.thread); @@ -1499,13 +1508,14 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) /* restart the scheduler */ kthread_unpark(ring->sched.thread); - mutex_unlock(&adev->lock_reset); + up_read(&adev->reset_sem); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); +pro_end: kfree(fences); - return 0; + return r; } static int amdgpu_debugfs_sclk_set(void *data, u64 val) @@ -1517,9 +1527,9 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return -EINVAL; - ret = pm_runtime_get_sync(adev->ddev->dev); + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; } @@ -1532,8 +1542,8 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) return 0; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (ret) return -EINVAL; @@ -1553,7 +1563,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) adev->debugfs_preempt = debugfs_create_file("amdgpu_preempt_ib", 0600, - adev->ddev->primary->debugfs_root, adev, + adev_to_drm(adev)->primary->debugfs_root, adev, &fops_ib_preempt); if (!(adev->debugfs_preempt)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); @@ -1562,7 +1572,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) adev->smu.debugfs_sclk = debugfs_create_file("amdgpu_force_sclk", 0200, - adev->ddev->primary->debugfs_root, adev, + adev_to_drm(adev)->primary->debugfs_root, adev, &fops_sclk_set); if (!(adev->smu.debugfs_sclk)) { DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); @@ -1623,6 +1633,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_debugfs_autodump_init(adev); + amdgpu_rap_debugfs_init(adev); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, ARRAY_SIZE(amdgpu_debugfs_list)); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4204cda680f5668d6d0c284e285e5852c3b17c22..f7307af76452171e79b24d862ee2b68597594230 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -132,7 +132,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); return snprintf(buf, PAGE_SIZE, "%llu\n", cnt); @@ -157,7 +157,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name); } @@ -179,7 +179,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number); } @@ -201,7 +201,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial); } @@ -219,7 +219,7 @@ static DEVICE_ATTR(serial_number, S_IRUGO, */ bool amdgpu_device_supports_boco(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (adev->flags & AMD_IS_PX) return true; @@ -236,7 +236,7 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) */ bool amdgpu_device_supports_baco(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_asic_supports_baco(adev); } @@ -319,8 +319,12 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_kiq_rreg(adev, reg); + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_sem)) { + ret = amdgpu_kiq_rreg(adev, reg); + up_read(&adev->reset_sem); + return ret; + } if ((reg * 4) < adev->rmmio_size) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -332,6 +336,7 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); return ret; } @@ -378,7 +383,9 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) BUG(); } -void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) +static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags) { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); @@ -407,8 +414,12 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) { - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_kiq_wreg(adev, reg, v); + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && + down_read_trylock(&adev->reset_sem)) { + amdgpu_kiq_wreg(adev, reg, v); + up_read(&adev->reset_sem); + return; + } amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); } @@ -653,6 +664,20 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, BUG(); } +/** + * amdgpu_device_asic_init - Wrapper for atom asic_init + * + * @dev: drm_device pointer + * + * Does any asic specific work and then calls atom asic init. + */ +static int amdgpu_device_asic_init(struct amdgpu_device *adev) +{ + amdgpu_asic_pre_asic_init(adev); + + return amdgpu_atom_asic_init(adev->mode_info.atom_context); +} + /** * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page * @@ -1199,6 +1224,11 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_gmc_tmz_set(adev); + if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { + amdgpu_num_kcq = 8; + dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n"); + } + return 0; } @@ -1211,7 +1241,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) * Callback for the switcheroo driver. Suspends or resumes the * the asics before or after it is powered up using ACPI methods. */ -static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); int r; @@ -1504,7 +1535,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) adev->enable_virtual_display = false; if (amdgpu_virtual_display) { - struct drm_device *ddev = adev->ddev; + struct drm_device *ddev = adev_to_drm(adev); const char *pci_address_name = pci_name(ddev->pdev); char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; @@ -1563,7 +1594,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; - if (adev->discovery_bin) { + if (adev->mman.discovery_bin) { amdgpu_discovery_get_gfx_info(adev); /* @@ -1935,7 +1966,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->ip_blocks[i].status.hw == true) break; - if (adev->in_gpu_reset || adev->in_suspend) { + if (amdgpu_in_reset(adev) || adev->in_suspend) { r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { DRM_ERROR("resume of IP block <%s> failed %d\n", @@ -2055,13 +2086,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) * it should be called after amdgpu_device_ip_hw_init_phase2 since * for some ASICs the RAS EEPROM code relies on SMU fully functioning * for I2C communication which only true at this point. - * recovery_init may fail, but it can free all resources allocated by - * itself and its failure should not stop amdgpu init process. + * + * amdgpu_ras_recovery_init may fail, but the upper only cares the + * failure from bad gpu situation and stop amdgpu init process + * accordingly. For other failed cases, it will still release all + * the resource and print error message, rather than returning one + * negative value to upper level. * * Note: theoretically, this should be called before all vram allocations * to protect retired page from abusing */ - amdgpu_ras_recovery_init(adev); + r = amdgpu_ras_recovery_init(adev); + if (r) + goto init_failed; if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_add_device(adev); @@ -2106,7 +2143,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) AMDGPU_RESET_MAGIC_NUM)) return true; - if (!adev->in_gpu_reset) + if (!amdgpu_in_reset(adev)) return false; /* @@ -2217,9 +2254,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void) gpu_ins = &(mgpu_info.gpu_ins[i]); adev = gpu_ins->adev; if (!(adev->flags & AMD_IS_APU) && - !gpu_ins->mgpu_fan_enabled && - adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { + !gpu_ins->mgpu_fan_enabled) { ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); if (ret) break; @@ -2574,17 +2609,16 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_IH, }; - for (i = 0; i < adev->num_ip_blocks; i++) - adev->ip_blocks[i].status.hw = false; - for (i = 0; i < ARRAY_SIZE(ip_order); i++) { int j; struct amdgpu_ip_block *block; - for (j = 0; j < adev->num_ip_blocks; j++) { - block = &adev->ip_blocks[j]; + block = &adev->ip_blocks[i]; + block->status.hw = false; - if (block->version->type != ip_order[i] || + for (j = 0; j < ARRAY_SIZE(ip_order); j++) { + + if (block->version->type != ip_order[j] || !block->status.valid) continue; @@ -2777,6 +2811,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif case CHIP_BONAIRE: case CHIP_KAVERI: case CHIP_KABINI: @@ -2831,7 +2871,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) */ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display) return false; return amdgpu_device_asic_has_dc_support(adev->asic_type); @@ -2842,7 +2882,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) { struct amdgpu_device *adev = container_of(__work, struct amdgpu_device, xgmi_reset_work); - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); /* It's a bug to not have a hive within this function */ if (WARN_ON(!hive)) @@ -2857,13 +2897,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { task_barrier_enter(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); + adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); if (adev->asic_reset_res) goto fail; task_barrier_exit(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); + adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); if (adev->asic_reset_res) goto fail; @@ -2879,7 +2919,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) fail: if (adev->asic_reset_res) DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", - adev->asic_reset_res, adev->ddev->unique); + adev->asic_reset_res, adev_to_drm(adev)->unique); + amdgpu_put_xgmi_hive(hive); } static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) @@ -2962,8 +3003,6 @@ static const struct attribute *amdgpu_dev_attributes[] = { * amdgpu_device_init - initialize the driver * * @adev: amdgpu_device pointer - * @ddev: drm dev pointer - * @pdev: pci dev pointer * @flags: driver flags * * Initializes the driver info and hw (all asics). @@ -2971,18 +3010,15 @@ static const struct attribute *amdgpu_dev_attributes[] = { * Called at driver startup. */ int amdgpu_device_init(struct amdgpu_device *adev, - struct drm_device *ddev, - struct pci_dev *pdev, uint32_t flags) { + struct drm_device *ddev = adev_to_drm(adev); + struct pci_dev *pdev = adev->pdev; int r, i; bool boco = false; u32 max_MBps; adev->shutdown = false; - adev->dev = &pdev->dev; - adev->ddev = ddev; - adev->pdev = pdev; adev->flags = flags; if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) @@ -3038,7 +3074,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); - mutex_init(&adev->lock_reset); + atomic_set(&adev->in_gpu_reset, 0); + init_rwsem(&adev->reset_sem); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); @@ -3188,7 +3225,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } DRM_INFO("GPU posting now...\n"); - r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + r = amdgpu_device_asic_init(adev); if (r) { dev_err(adev->dev, "gpu post error!\n"); goto failed; @@ -3226,7 +3263,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } /* init the mode config */ - drm_mode_config_init(adev->ddev); + drm_mode_config_init(adev_to_drm(adev)); r = amdgpu_device_ip_init(adev); if (r) { @@ -3352,9 +3389,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, */ void amdgpu_device_fini(struct amdgpu_device *adev) { - int r; - - DRM_INFO("amdgpu: finishing device.\n"); + dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; @@ -3368,15 +3403,15 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ if (!amdgpu_device_has_dc_support(adev)) - drm_helper_force_disable_all(adev->ddev); + drm_helper_force_disable_all(adev_to_drm(adev)); else - drm_atomic_helper_shutdown(adev->ddev); + drm_atomic_helper_shutdown(adev_to_drm(adev)); } amdgpu_fence_driver_fini(adev); if (adev->pm_sysfs_en) amdgpu_pm_sysfs_fini(adev); amdgpu_fbdev_fini(adev); - r = amdgpu_device_ip_fini(adev); + amdgpu_device_ip_fini(adev); release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; @@ -3394,7 +3429,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_has_atpx_dgpu_power_cntl()) && !pci_is_thunderbolt_attached(adev->pdev)) vga_switcheroo_unregister_client(adev->pdev); - if (amdgpu_device_supports_boco(adev->ddev)) + if (amdgpu_device_supports_boco(adev_to_drm(adev))) vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) @@ -3410,7 +3445,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); - if (adev->discovery_bin) + if (adev->mman.discovery_bin) amdgpu_discovery_fini(adev); } @@ -3436,11 +3471,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) struct drm_connector_list_iter iter; int r; - if (dev == NULL || dev->dev_private == NULL) { - return -ENODEV; - } - - adev = dev->dev_private; + adev = drm_to_adev(dev); if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -3528,7 +3559,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) { struct drm_connector *connector; struct drm_connector_list_iter iter; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_crtc *crtc; int r = 0; @@ -3537,14 +3568,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) /* post card */ if (amdgpu_device_need_post(adev)) { - r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + r = amdgpu_device_asic_init(adev); if (r) - DRM_ERROR("amdgpu asic init failed\n"); + dev_err(adev->dev, "amdgpu asic init failed\n"); } r = amdgpu_device_ip_resume(adev); if (r) { - DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); + dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); return r; } amdgpu_fence_driver_resume(adev); @@ -3568,7 +3599,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) if (r == 0) { r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); if (r != 0) - DRM_ERROR("Failed to pin cursor BO (%d)\n", r); + dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); amdgpu_bo_unreserve(aobj); } @@ -3658,7 +3689,7 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) adev->ip_blocks[i].status.hang = adev->ip_blocks[i].version->funcs->check_soft_reset(adev); if (adev->ip_blocks[i].status.hang) { - DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); + dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); asic_hang = true; } } @@ -3719,7 +3750,7 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { if (adev->ip_blocks[i].status.hang) { - DRM_INFO("Some block need full reset!\n"); + dev_info(adev->dev, "Some block need full reset!\n"); return true; } } @@ -3807,7 +3838,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) else tmo = msecs_to_jiffies(100); - DRM_INFO("recover vram bo from shadow start\n"); + dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { @@ -3843,11 +3874,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dma_fence_put(fence); if (r < 0 || tmo <= 0) { - DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); + dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); return -EIO; } - DRM_INFO("recover vram bo from shadow done\n"); + dev_info(adev->dev, "recover vram bo from shadow done\n"); return 0; } @@ -3907,6 +3938,34 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, return r; } +/** + * amdgpu_device_has_job_running - check if there is any job in mirror list + * + * @adev: amdgpu device pointer + * + * check if there is any job in mirror list + */ +bool amdgpu_device_has_job_running(struct amdgpu_device *adev) +{ + int i; + struct drm_sched_job *job; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + spin_lock(&ring->sched.job_list_lock); + job = list_first_entry_or_null(&ring->sched.ring_mirror_list, + struct drm_sched_job, node); + spin_unlock(&ring->sched.job_list_lock); + if (job) + return true; + } + return false; +} + /** * amdgpu_device_should_recover_gpu - check if we should try GPU recovery * @@ -3918,7 +3977,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) { if (!amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("Timeout, but no hardware hang detected.\n"); + dev_info(adev->dev, "Timeout, but no hardware hang detected.\n"); return false; } @@ -3958,7 +4017,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) return true; disabled: - DRM_INFO("GPU recovery disabled.\n"); + dev_info(adev->dev, "GPU recovery disabled.\n"); return false; } @@ -3997,7 +4056,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, r = amdgpu_device_ip_soft_reset(adev); amdgpu_device_ip_post_soft_reset(adev); if (r || amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("soft reset failed, will fallback to full reset!\n"); + dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); need_full_reset = true; } } @@ -4033,8 +4092,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, r = amdgpu_asic_reset(tmp_adev); if (r) { - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", - r, tmp_adev->ddev->unique); + dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", + r, adev_to_drm(tmp_adev)->unique); break; } } @@ -4066,8 +4125,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { if (need_full_reset) { /* post card */ - if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) - DRM_WARN("asic atom init failed!"); + if (amdgpu_device_asic_init(tmp_adev)) + dev_warn(tmp_adev->dev, "asic atom init failed!"); if (!r) { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); @@ -4108,8 +4167,23 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, amdgpu_fbdev_set_suspend(tmp_adev, 0); - /* must succeed. */ - amdgpu_ras_resume(tmp_adev); + /* + * The GPU enters bad state once faulty pages + * by ECC has reached the threshold, and ras + * recovery is scheduled next. So add one check + * here to break recovery if it indeed exceeds + * bad page threshold, and remind user to + * retire this GPU or setting one bigger + * bad_page_threshold value to fix this once + * probing driver again. + */ + if (!amdgpu_ras_check_err_threshold(tmp_adev)) { + /* must succeed. */ + amdgpu_ras_resume(tmp_adev); + } else { + r = -EINVAL; + goto out; + } /* Update PSP FW topology after reset */ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) @@ -4117,7 +4191,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, } } - out: if (!r) { amdgpu_irq_gpu_reset_resume_helper(tmp_adev); @@ -4142,16 +4215,19 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, return r; } -static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) +static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive) { - if (trylock) { - if (!mutex_trylock(&adev->lock_reset)) - return false; - } else - mutex_lock(&adev->lock_reset); + if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) + return false; + + if (hive) { + down_write_nest_lock(&adev->reset_sem, &hive->hive_lock); + } else { + down_write(&adev->reset_sem); + } atomic_inc(&adev->gpu_reset_counter); - adev->in_gpu_reset = true; switch (amdgpu_asic_reset_method(adev)) { case AMD_RESET_METHOD_MODE1: adev->mp1_state = PP_MP1_STATE_SHUTDOWN; @@ -4171,8 +4247,8 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; - adev->in_gpu_reset = false; - mutex_unlock(&adev->lock_reset); + atomic_set(&adev->in_gpu_reset, 0); + up_write(&adev->reset_sem); } static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) @@ -4282,12 +4358,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * We always reset all schedulers for device and all devices for XGMI * hive so that should take care of them too. */ - hive = amdgpu_get_xgmi_hive(adev, true); - if (hive && !mutex_trylock(&hive->reset_lock)) { - DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", - job ? job->base.id : -1, hive->hive_id); - mutex_unlock(&hive->hive_lock); - return 0; + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) { + DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", + job ? job->base.id : -1, hive->hive_id); + amdgpu_put_xgmi_hive(hive); + return 0; + } + mutex_lock(&hive->hive_lock); } /* @@ -4309,11 +4388,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - if (!amdgpu_device_lock_adev(tmp_adev, !hive)) { - DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress", + if (!amdgpu_device_lock_adev(tmp_adev, hive)) { + dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", job ? job->base.id : -1); - mutex_unlock(&hive->hive_lock); - return 0; + r = 0; + goto skip_recovery; } /* @@ -4385,8 +4464,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, &need_full_reset); /*TODO Should we stop ?*/ if (r) { - DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", - r, tmp_adev->ddev->unique); + dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", + r, adev_to_drm(tmp_adev)->unique); tmp_adev->asic_reset_res = r; } } @@ -4422,7 +4501,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, } if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { - drm_helper_resume_force_mode(tmp_adev->ddev); + drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } tmp_adev->asic_reset_res = 0; @@ -4446,9 +4525,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, amdgpu_device_unlock_adev(tmp_adev); } +skip_recovery: if (hive) { - mutex_unlock(&hive->reset_lock); + atomic_set(&hive->in_reset, 0); mutex_unlock(&hive->hive_lock); + amdgpu_put_xgmi_hive(hive); } if (r) @@ -4594,10 +4675,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) int amdgpu_device_baco_enter(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!amdgpu_device_supports_baco(adev->ddev)) + if (!amdgpu_device_supports_baco(adev_to_drm(adev))) return -ENOTSUPP; if (ras && ras->supported) @@ -4608,11 +4689,11 @@ int amdgpu_device_baco_enter(struct drm_device *dev) int amdgpu_device_baco_exit(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0; - if (!amdgpu_device_supports_baco(adev->ddev)) + if (!amdgpu_device_supports_baco(adev_to_drm(adev))) return -ENOTSUPP; ret = amdgpu_dpm_baco_exit(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index a50ff2306504cf17c5ba1bd46c44f72958a67f4d..bfb95143ba5e8fc335a6f84901c814aa2241edd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, - adev->discovery_tmr_size, false); + adev->mman.discovery_tmr_size, false); return 0; } @@ -168,18 +168,18 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) uint16_t checksum; int r; - adev->discovery_tmr_size = DISCOVERY_TMR_SIZE; - adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL); - if (!adev->discovery_bin) + adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; + adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); + if (!adev->mman.discovery_bin) return -ENOMEM; - r = amdgpu_discovery_read_binary(adev, adev->discovery_bin); + r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); if (r) { DRM_ERROR("failed to read ip discovery binary\n"); goto out; } - bhdr = (struct binary_header *)adev->discovery_bin; + bhdr = (struct binary_header *)adev->mman.discovery_bin; if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) { DRM_ERROR("invalid ip discovery binary signature\n"); @@ -192,7 +192,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) size = bhdr->binary_size - offset; checksum = bhdr->binary_checksum; - if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset, + if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { DRM_ERROR("invalid ip discovery binary checksum\n"); r = -EINVAL; @@ -202,7 +202,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) info = &bhdr->table_list[IP_DISCOVERY]; offset = le16_to_cpu(info->offset); checksum = le16_to_cpu(info->checksum); - ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset); + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { DRM_ERROR("invalid ip discovery data table signature\n"); @@ -210,7 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset, + if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, ihdr->size, checksum)) { DRM_ERROR("invalid ip discovery data table checksum\n"); r = -EINVAL; @@ -220,9 +220,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) info = &bhdr->table_list[GC]; offset = le16_to_cpu(info->offset); checksum = le16_to_cpu(info->checksum); - ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset); + ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); - if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset, + if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, ghdr->size, checksum)) { DRM_ERROR("invalid gc data table checksum\n"); r = -EINVAL; @@ -232,16 +232,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) return 0; out: - kfree(adev->discovery_bin); - adev->discovery_bin = NULL; + kfree(adev->mman.discovery_bin); + adev->mman.discovery_bin = NULL; return r; } void amdgpu_discovery_fini(struct amdgpu_device *adev) { - kfree(adev->discovery_bin); - adev->discovery_bin = NULL; + kfree(adev->mman.discovery_bin); + adev->mman.discovery_bin = NULL; } int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) @@ -265,8 +265,8 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) return r; } - bhdr = (struct binary_header *)adev->discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->discovery_bin + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); @@ -274,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->discovery_bin + die_offset); + dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -288,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(dhdr->die_id), num_ips); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->discovery_bin + ip_offset); + ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); num_base_address = ip->num_base_address; DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", @@ -337,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, uint16_t num_ips; int i, j; - if (!adev->discovery_bin) { + if (!adev->mman.discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->discovery_bin + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->discovery_bin + die_offset); + dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->discovery_bin + ip_offset); + ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); if (le16_to_cpu(ip->hw_id) == hw_id) { if (major) @@ -377,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) struct binary_header *bhdr; struct gc_info_v1_0 *gc_info; - if (!adev->discovery_bin) { + if (!adev->mman.discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->discovery_bin; - gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin + + bhdr = (struct binary_header *)adev->mman.discovery_bin; + gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[GC].offset)); adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index d761729651998684d8a5407067a98a1e5bce09ad..c81206e6096f4021d0ef08f5a90f6474e283329a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -93,7 +93,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) * targeted by the flip */ if (amdgpu_crtc->enabled && - (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, + (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0, &vpos, &hpos, NULL, NULL, &crtc->hwmode) & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == @@ -152,7 +152,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_gem_object *obj; struct amdgpu_flip_work *work; @@ -292,7 +292,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, pm_runtime_mark_last_busy(dev->dev); - adev = dev->dev_private; + adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, take the current one */ if (active && !adev->have_disp_power_ref) { @@ -619,51 +619,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) int sz; adev->mode_info.coherent_mode_property = - drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); + drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1); if (!adev->mode_info.coherent_mode_property) return -ENOMEM; adev->mode_info.load_detect_property = - drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); + drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1); if (!adev->mode_info.load_detect_property) return -ENOMEM; - drm_mode_create_scaling_mode_property(adev->ddev); + drm_mode_create_scaling_mode_property(adev_to_drm(adev)); sz = ARRAY_SIZE(amdgpu_underscan_enum_list); adev->mode_info.underscan_property = - drm_property_create_enum(adev->ddev, 0, - "underscan", - amdgpu_underscan_enum_list, sz); + drm_property_create_enum(adev_to_drm(adev), 0, + "underscan", + amdgpu_underscan_enum_list, sz); adev->mode_info.underscan_hborder_property = - drm_property_create_range(adev->ddev, 0, - "underscan hborder", 0, 128); + drm_property_create_range(adev_to_drm(adev), 0, + "underscan hborder", 0, 128); if (!adev->mode_info.underscan_hborder_property) return -ENOMEM; adev->mode_info.underscan_vborder_property = - drm_property_create_range(adev->ddev, 0, - "underscan vborder", 0, 128); + drm_property_create_range(adev_to_drm(adev), 0, + "underscan vborder", 0, 128); if (!adev->mode_info.underscan_vborder_property) return -ENOMEM; sz = ARRAY_SIZE(amdgpu_audio_enum_list); adev->mode_info.audio_property = - drm_property_create_enum(adev->ddev, 0, + drm_property_create_enum(adev_to_drm(adev), 0, "audio", amdgpu_audio_enum_list, sz); sz = ARRAY_SIZE(amdgpu_dither_enum_list); adev->mode_info.dither_property = - drm_property_create_enum(adev->ddev, 0, + drm_property_create_enum(adev_to_drm(adev), 0, "dither", amdgpu_dither_enum_list, sz); if (amdgpu_device_has_dc_support(adev)) { adev->mode_info.abm_level_property = - drm_property_create_range(adev->ddev, 0, - "abm level", 0, 4); + drm_property_create_range(adev_to_drm(adev), 0, + "abm level", 0, 4); if (!adev->mode_info.abm_level_property) return -ENOMEM; } @@ -813,7 +813,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, int vbl_start, vbl_end, vtotal, ret = 0; bool in_vbl = true; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index d7050ab95946c842f9dfe7101090fe280df9c9ec..957934926b24529341b3b0a4c13468d7e012afee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -35,6 +35,7 @@ #include "amdgpu_display.h" #include "amdgpu_gem.h" #include "amdgpu_dma_buf.h" +#include "amdgpu_xgmi.h" #include #include #include @@ -455,7 +456,7 @@ static struct drm_gem_object * amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) { struct dma_resv *resv = dma_buf->resv; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_bo *bo; struct amdgpu_bo_param bp; int ret; @@ -596,3 +597,36 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, obj->import_attach = attach; return obj; } + +/** + * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer + * + * @adev: amdgpu_device pointer of the importer + * @bo: amdgpu buffer object + * + * Returns: + * True if dmabuf accessible over xgmi, false otherwise. + */ +bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, + struct amdgpu_bo *bo) +{ + struct drm_gem_object *obj = &bo->tbo.base; + struct drm_gem_object *gobj; + + if (obj->import_attach) { + struct dma_buf *dma_buf = obj->import_attach->dmabuf; + + if (dma_buf->ops != &amdgpu_dmabuf_ops) + /* No XGMI with non AMD GPUs */ + return false; + + gobj = dma_buf->priv; + bo = gem_to_amdgpu_bo(gobj); + } + + if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && + (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) + return true; + + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index ec447a7b6b28394bcfe1184fe2ab45f0e5610419..2c5c84a06bb93f09034f0e790bcdba83c73acaa0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -29,6 +29,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, int flags); struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); +bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, + struct amdgpu_bo *bo); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 81a79760ca61e1bcdab8f278eb3b80896d72ce94..6edde2b9e4020c331f04c6786ff5d9ba14de9dec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "amdgpu_drv.h" #include @@ -88,9 +89,10 @@ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC * - 3.39.0 - DMABUF implicit sync does a full pipeline sync + * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 39 +#define KMS_DRIVER_MINOR 40 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -150,12 +152,14 @@ int amdgpu_noretry; int amdgpu_force_asic_type = -1; int amdgpu_tmz = 0; int amdgpu_reset_method = -1; /* auto */ +int amdgpu_num_kcq = -1; struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; uint amdgpu_ras_mask = 0xffffffff; +int amdgpu_bad_page_threshold = -1; /** * DOC: vramlimit (int) @@ -676,11 +680,14 @@ MODULE_PARM_DESC(debug_largebar, * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT * table to get information about AMD APUs. This option can serve as a workaround on * systems with a broken CRAT table. + * + * Default is auto (according to asic type, iommu_v2, and crat table, to decide + * whehter use CRAT) */ int ignore_crat; module_param(ignore_crat, int, 0444); MODULE_PARM_DESC(ignore_crat, - "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); + "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)"); /** * DOC: halt_if_hws_hang (int) @@ -715,6 +722,15 @@ MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 bool debug_evictions; module_param(debug_evictions, bool, 0644); MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)"); + +/** + * DOC: no_system_mem_limit(bool) + * Disable system memory limit, to support multiple process shared memory + */ +bool no_system_mem_limit; +module_param(no_system_mem_limit, bool, 0644); +MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)"); + #endif /** @@ -765,6 +781,19 @@ module_param_named(tmz, amdgpu_tmz, int, 0444); MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)"); module_param_named(reset_method, amdgpu_reset_method, int, 0444); +/** + * DOC: bad_page_threshold (int) + * Bad page threshold is to specify the threshold value of faulty pages + * detected by RAS ECC, that may result in GPU entering bad status if total + * faulty pages by ECC exceed threshold value and leave it for user's further + * check. + */ +MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)"); +module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444); + +MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)"); +module_param_named(num_kcq, amdgpu_num_kcq, int, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1057,7 +1086,7 @@ static struct drm_driver kms_driver; static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct drm_device *dev; + struct drm_device *ddev; struct amdgpu_device *adev; unsigned long flags = ent->driver_data; int ret, retry = 0; @@ -1113,36 +1142,44 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - dev = drm_dev_alloc(&kms_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + adev->dev = &pdev->dev; + adev->pdev = pdev; + ddev = adev_to_drm(adev); + ret = drm_dev_init(ddev, &kms_driver, &pdev->dev); + if (ret) + goto err_free; + + drmm_add_final_kfree(ddev, adev); if (!supports_atomic) - dev->driver_features &= ~DRIVER_ATOMIC; + ddev->driver_features &= ~DRIVER_ATOMIC; ret = pci_enable_device(pdev); if (ret) goto err_free; - dev->pdev = pdev; - - pci_set_drvdata(pdev, dev); + ddev->pdev = pdev; + pci_set_drvdata(pdev, ddev); - ret = amdgpu_driver_load_kms(dev, ent->driver_data); + ret = amdgpu_driver_load_kms(adev, ent->driver_data); if (ret) goto err_pci; retry_init: - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(ddev, ent->driver_data); if (ret == -EAGAIN && ++retry <= 3) { DRM_INFO("retry init %d\n", retry); /* Don't request EX mode too frequently which is attacking */ msleep(5000); goto retry_init; - } else if (ret) + } else if (ret) { goto err_pci; + } - adev = dev->dev_private; ret = amdgpu_debugfs_init(adev); if (ret) DRM_ERROR("Creating debugfs files failed (%d).\n", ret); @@ -1152,7 +1189,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, err_pci: pci_disable_device(pdev); err_free: - drm_dev_put(dev); + drm_dev_put(ddev); return ret; } @@ -1176,7 +1213,7 @@ static void amdgpu_pci_shutdown(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (amdgpu_ras_intr_triggered()) return; @@ -1209,7 +1246,7 @@ static int amdgpu_pmops_resume(struct device *dev) static int amdgpu_pmops_freeze(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; adev->in_hibernate = true; @@ -1245,7 +1282,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct amdgpu_device *adev = drm_dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(drm_dev); int ret, i; if (!adev->runpm) { @@ -1296,7 +1333,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct amdgpu_device *adev = drm_dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(drm_dev); int ret; if (!adev->runpm) @@ -1332,7 +1369,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(drm_dev); /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ int ret = 1; @@ -1500,8 +1537,6 @@ static struct pci_driver amdgpu_kms_pci_driver = { .driver.pm = &amdgpu_pm_ops, }; - - static int __init amdgpu_init(void) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 61fcf247a638926d986237d95244fbc5800f1fc0..af4ef84e27a7738a5a1226a472a4af9997efbe16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -35,7 +35,7 @@ void amdgpu_link_encoder_connector(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_connector *connector; struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index db731f573f98202192b774298cbab04f2d834469..e2c2eb45a7934fadaf812b3b7f65c9cdfa3bbbc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -135,7 +135,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CLEARED; - info = drm_get_format_info(adev->ddev, mode_cmd); + info = drm_get_format_info(adev_to_drm(adev), mode_cmd); cpp = info->cpp[0]; /* need to align pitch with crtc limits */ @@ -231,7 +231,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, goto out; } - ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb, + ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); @@ -254,7 +254,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); /* setup aperture base/size for vesafb takeover */ - info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; + info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base; info->apertures->ranges[0].size = adev->gmc.aper_size; /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -270,7 +270,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); - vga_switcheroo_client_fb_set(adev->ddev->pdev, info); + vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info); return 0; out: @@ -318,7 +318,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) return 0; /* don't init fbdev if there are no connectors */ - if (list_empty(&adev->ddev->mode_config.connector_list)) + if (list_empty(&adev_to_drm(adev)->mode_config.connector_list)) return 0; /* select 8 bpp console on low vram cards */ @@ -332,10 +332,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) rfbdev->adev = adev; adev->mode_info.rfbdev = rfbdev; - drm_fb_helper_prepare(adev->ddev, &rfbdev->helper, - &amdgpu_fb_helper_funcs); + drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper, + &amdgpu_fb_helper_funcs); - ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper); + ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper); if (ret) { kfree(rfbdev); return ret; @@ -343,7 +343,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) /* disable all the possible outputs/crtcs before entering KMS mode */ if (!amdgpu_device_has_dc_support(adev)) - drm_helper_disable_unused_functions(adev->ddev); + drm_helper_disable_unused_functions(adev_to_drm(adev)); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; @@ -354,7 +354,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev) if (!adev->mode_info.rfbdev) return; - amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev); + amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev); kfree(adev->mode_info.rfbdev); adev->mode_info.rfbdev = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 58d4c219178a29593592197beed39abfa8bfef48..fe2d495d08ab01d17bdb2503a2ab011d1818dfed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -155,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); - pm_runtime_get_noresume(adev->ddev->dev); + pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -284,8 +284,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) BUG(); dma_fence_put(fence); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } while (last_seq != seq); return true; @@ -700,7 +700,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -749,7 +749,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int r; r = pm_runtime_get_sync(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 6b1eb9d045ae6ce72f07d1b6a4d7ea16e8008f72..aa7f230c71bf5aa48328accb28fa2d417282183e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -93,7 +93,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, void amdgpu_gem_force_release(struct amdgpu_device *adev) { - struct drm_device *ddev = adev->ddev; + struct drm_device *ddev = adev_to_drm(adev); struct drm_file *file; mutex_lock(&ddev->filelist_mutex); @@ -217,7 +217,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; union drm_amdgpu_gem_create *args = data; @@ -298,7 +298,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct ttm_operation_ctx ctx = { true, false }; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_userptr *args = data; struct drm_gem_object *gobj; struct amdgpu_bo *bo; @@ -587,7 +587,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_va *args = data; struct drm_gem_object *gobj; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; @@ -711,7 +711,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_op *args = data; struct drm_gem_object *gobj; struct amdgpu_vm_bo_base *base; @@ -788,7 +788,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 78d37f92c7be53eb82f006f4d0da95519d3dcecf..d6981425ec5101eac8841fa4fe6c2c3fbbdae0f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -202,40 +202,29 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe, mec; + int i, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); - - /* policy for amdgpu compute queue ownership */ - for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - queue = i % adev->gfx.mec.num_queue_per_pipe; - pipe = (i / adev->gfx.mec.num_queue_per_pipe) - % adev->gfx.mec.num_pipe_per_mec; - mec = (i / adev->gfx.mec.num_queue_per_pipe) - / adev->gfx.mec.num_pipe_per_mec; - - /* we've run out of HW */ - if (mec >= adev->gfx.mec.num_mec) - break; - - if (multipipe_policy) { - /* policy: amdgpu owns the first two queues of the first MEC */ - if (mec == 0 && queue < 2) - set_bit(i, adev->gfx.mec.queue_bitmap); - } else { - /* policy: amdgpu owns all queues in the first pipe */ - if (mec == 0 && pipe == 0) - set_bit(i, adev->gfx.mec.queue_bitmap); + int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe, + adev->gfx.num_compute_rings); + + if (multipipe_policy) { + /* policy: make queues evenly cross all pipes on MEC1 only */ + for (i = 0; i < max_queues_per_mec; i++) { + pipe = i % adev->gfx.mec.num_pipe_per_mec; + queue = (i / adev->gfx.mec.num_pipe_per_mec) % + adev->gfx.mec.num_queue_per_pipe; + + set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, + adev->gfx.mec.queue_bitmap); } + } else { + /* policy: amdgpu owns all queues in the given pipe */ + for (i = 0; i < max_queues_per_mec; ++i) + set_bit(i, adev->gfx.mec.queue_bitmap); } - /* update the number of active compute rings */ - adev->gfx.num_compute_rings = - bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); - - /* If you hit this case and edited the policy, you probably just - * need to increase AMDGPU_MAX_COMPUTE_RINGS */ - if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); } void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) @@ -571,8 +560,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); } else if (!enable && adev->gfx.gfx_off_state) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) { adev->gfx.gfx_off_state = false; + + if (adev->gfx.funcs->init_spm_golden) { + dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n"); + amdgpu_gfx_init_spm_golden(adev); + } + } } mutex_unlock(&adev->gfx.gfx_off_mutex); @@ -724,7 +719,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) * * also don't wait anymore for IRQ context * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) goto failed_kiq_read; might_sleep(); @@ -748,7 +743,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) failed_kiq_read: if (reg_val_offs) amdgpu_device_wb_free(adev, reg_val_offs); - pr_err("failed to read reg:%x\n", reg); + dev_err(adev->dev, "failed to read reg:%x\n", reg); return ~0; } @@ -782,7 +777,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) * * also don't wait anymore for IRQ context * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) goto failed_kiq_write; might_sleep(); @@ -801,5 +796,5 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) amdgpu_ring_undo(ring); spin_unlock_irqrestore(&kiq->ring_lock, flags); failed_kiq_write: - pr_err("failed to write reg:%x\n", reg); + dev_err(adev->dev, "failed to write reg:%x\n", reg); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 1e7a2b0997c56a58b6c9426db48ccc17f4718433..a611e78dd4bac536d4bfae5ca6f9feb88c490baa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -216,6 +216,7 @@ struct amdgpu_gfx_funcs { int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); void (*reset_ras_error_count) (struct amdgpu_device *adev); + void (*init_spm_golden)(struct amdgpu_device *adev); }; struct sq_work { @@ -324,6 +325,7 @@ struct amdgpu_gfx { #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) +#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) /** * amdgpu_gfx_create_bitmask - create a bitmask diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 34cbd6f6a56b7864f95eb1ad62cf1112b3e51d1e..213ef090bb0e8bd26e63aabbd72e9a18a654e1b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -27,6 +27,7 @@ #include #include "amdgpu.h" +#include "amdgpu_gmc.h" #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" @@ -411,3 +412,64 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) break; } } + +void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, + bool enable) +{ + struct amdgpu_vmhub *hub; + u32 tmp, reg, i; + + hub = &adev->vmhub[hub_type]; + for (i = 0; i < 16; i++) { + reg = hub->vm_context0_cntl + hub->ctx_distance * i; + + tmp = RREG32(reg); + if (enable) + tmp |= hub->vm_cntx_cntl_vm_fault; + else + tmp &= ~hub->vm_cntx_cntl_vm_fault; + + WREG32(reg, tmp); + } +} + +void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) +{ + unsigned size; + + /* + * TODO: + * Currently there is a bug where some memory client outside + * of the driver writes to first 8M of VRAM on S3 resume, + * this overrides GART which by default gets placed in first 8M and + * causes VM_FAULTS once GTT is accessed. + * Keep the stolen memory reservation until the while this is not solved. + */ + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_RAVEN: + case CHIP_RENOIR: + adev->mman.keep_stolen_vga_memory = true; + break; + default: + adev->mman.keep_stolen_vga_memory = false; + break; + } + + if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) + size = 0; + else + size = amdgpu_gmc_get_vbios_fb_size(adev); + + /* set to 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) + size = 0; + + if (size > AMDGPU_VBIOS_VGA_ALLOCATION) { + adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; + adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; + } else { + adev->mman.stolen_vga_size = size; + adev->mman.stolen_extended_size = 0; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index acdb61cfa24cd4ab2150cceec93cc964bacfee42..d61bbde4c7d2cbba288099535aba6b394b58fa12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -74,6 +74,12 @@ struct amdgpu_gmc_fault { /* * VMHUB structures, functions & helpers */ +struct amdgpu_vmhub_funcs { + void (*print_l2_protection_fault_status)(struct amdgpu_device *adev, + uint32_t status); + uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type); +}; + struct amdgpu_vmhub { uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_hi32; @@ -92,6 +98,10 @@ struct amdgpu_vmhub { uint32_t ctx_addr_distance; /* include LO32/HI32 */ uint32_t eng_distance; uint32_t eng_addr_distance; /* include LO32/HI32 */ + + uint32_t vm_cntx_cntl_vm_fault; + + const struct amdgpu_vmhub_funcs *vmhub_funcs; }; /* @@ -121,6 +131,8 @@ struct amdgpu_gmc_funcs { void (*get_vm_pte)(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping, uint64_t *flags); + /* get the amount of memory used by the vbios for pre-OS console */ + unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); }; struct amdgpu_xgmi { @@ -203,7 +215,6 @@ struct amdgpu_gmc { uint8_t vram_vendor; uint32_t srbm_soft_reset; bool prt_warning; - uint64_t stolen_size; uint32_t sdpif_register; /* apertures */ u64 shared_aperture_start; @@ -239,6 +250,7 @@ struct amdgpu_gmc { #define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) +#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR @@ -289,4 +301,10 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev); +extern void +amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, + bool enable); + +void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 697bc2c6fdb29954d92c3a4d9e7d64d77d3385b2..2b1b7c136343d1e80710f24d7574d1d0510b81e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -46,8 +46,9 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + return snprintf(buf, PAGE_SIZE, "%llu\n", man->size * PAGE_SIZE); } @@ -64,8 +65,9 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_gtt_mgr_usage(man)); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 70dbe343f51df5c4de78e9e3d68b37d55a59865d..47cad23a6b9e2b6b81223a55df741e17da7cfbca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -40,7 +40,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap) { struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t temp; @@ -82,7 +82,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap) static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap) { struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t temp; @@ -101,7 +101,7 @@ static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap) static int amdgpu_i2c_get_clock(void *i2c_priv) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -116,7 +116,7 @@ static int amdgpu_i2c_get_clock(void *i2c_priv) static int amdgpu_i2c_get_data(void *i2c_priv) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -130,7 +130,7 @@ static int amdgpu_i2c_get_data(void *i2c_priv) static void amdgpu_i2c_set_clock(void *i2c_priv, int clock) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -143,7 +143,7 @@ static void amdgpu_i2c_set_clock(void *i2c_priv, int clock) static void amdgpu_i2c_set_data(void *i2c_priv, int data) { struct amdgpu_i2c_chan *i2c = i2c_priv; - struct amdgpu_device *adev = i2c->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(i2c->dev); struct amdgpu_i2c_bus_rec *rec = &i2c->rec; uint32_t val; @@ -253,7 +253,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev, const struct amdgpu_i2c_bus_rec *rec, const char *name) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); int i; for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index dcd4921705987c44b798bfa92c0f037b968a4dd4..2f53fa0ae9a62535a5aeeca9eeb6614f00a0f91a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -445,7 +445,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); seq_printf(m, "--------------------- DELAYED --------------------- \n"); amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 0cc4c67f95f721f7671d5e46824b0b0d5e70e7fa..300ac73b47382db380d766581e5ef75f8451ca52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -85,7 +85,7 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, hotplug_work); - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; struct drm_connector_list_iter iter; @@ -151,7 +151,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) irqreturn_t amdgpu_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); irqreturn_t ret; ret = amdgpu_ih_process(adev, &adev->irq.ih); @@ -268,9 +268,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ /* XXX: can this be enabled for DC? */ - adev->ddev->vblank_disable_immediate = true; + adev_to_drm(adev)->vblank_disable_immediate = true; - r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); + r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); if (r) return r; @@ -284,14 +284,14 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.installed = true; /* Use vector 0 for MSI-X */ - r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0)); + r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0)); if (r) { adev->irq.installed = false; if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); return r; } - adev->ddev->max_vblank_count = 0x00ffffff; + adev_to_drm(adev)->max_vblank_count = 0x00ffffff; DRM_DEBUG("amdgpu: irq initialized.\n"); return 0; @@ -311,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) unsigned i, j; if (adev->irq.installed) { - drm_irq_uninstall(adev->ddev); + drm_irq_uninstall(adev_to_drm(adev)); adev->irq.installed = false; if (adev->irq.msi_enabled) pci_free_irq_vectors(adev->pdev); @@ -522,7 +522,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev->ddev->irq_enabled) + if (!adev_to_drm(adev)->irq_enabled) return -ENOENT; if (type >= src->num_types) @@ -552,7 +552,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev->ddev->irq_enabled) + if (!adev_to_drm(adev)->irq_enabled) return -ENOENT; if (type >= src->num_types) @@ -583,7 +583,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev->ddev->irq_enabled) + if (!adev_to_drm(adev)->irq_enabled) return false; if (type >= src->num_types) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 937029ad5271ab6a89ef60014f71e6b88b9755c8..dcfe8a3b03ffb9efdbeeba5a560bbc37cd777320 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = &sched->sched_rq[i]; if (!rq) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 55ff071217a94798b925f5f2da3e80cde23a0318..456a4a93b337a487c6b710d857918f7a78dddc31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -78,7 +78,7 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) */ void amdgpu_driver_unload_kms(struct drm_device *dev) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (adev == NULL) return; @@ -86,7 +86,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) amdgpu_unregister_gpu_instance(adev); if (adev->rmmio == NULL) - goto done_free; + return; if (adev->runpm) { pm_runtime_get_sync(dev->dev); @@ -94,12 +94,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) } amdgpu_acpi_fini(adev); - amdgpu_device_fini(adev); - -done_free: - kfree(adev); - dev->dev_private = NULL; } void amdgpu_register_gpu_instance(struct amdgpu_device *adev) @@ -130,22 +125,18 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) /** * amdgpu_driver_load_kms - Main load function for KMS. * - * @dev: drm dev pointer + * @adev: pointer to struct amdgpu_device * @flags: device flags * * This is the main load function for KMS (all asics). * Returns 0 on success, error on failure. */ -int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) +int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { - struct amdgpu_device *adev; + struct drm_device *dev; int r, acpi_status; - adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); - if (adev == NULL) { - return -ENOMEM; - } - dev->dev_private = (void *)adev; + dev = adev_to_drm(adev); if (amdgpu_has_atpx() && (amdgpu_is_atpx_hybrid() || @@ -160,7 +151,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) * properly initialize the GPU MC controller and permit * VRAM allocation */ - r = amdgpu_device_init(adev, dev, dev->pdev, flags); + r = amdgpu_device_init(adev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; @@ -179,6 +170,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) case CHIP_VEGA20: case CHIP_ARCTURUS: case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: /* enable runpm if runpm=1 */ if (amdgpu_runtime_pm > 0) adev->runpm = true; @@ -479,7 +471,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, */ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_info *info = data; struct amdgpu_mode_info *minfo = &adev->mode_info; void __user *out = (void __user *)(uintptr_t)info->return_pointer; @@ -681,8 +673,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; @@ -740,6 +736,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; + if (amdgpu_is_tmz(adev)) + dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE; @@ -993,7 +991,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev) */ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv; int r, pasid; @@ -1078,7 +1076,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; struct amdgpu_bo *pd; @@ -1143,7 +1141,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = crtc->index; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int vpos, hpos, stat; u32 count; @@ -1211,7 +1209,7 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = crtc->index; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); return amdgpu_irq_get(adev, &adev->crtc_irq, idx); @@ -1228,7 +1226,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = crtc->index; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); amdgpu_irq_put(adev, &adev->crtc_irq, idx); @@ -1264,7 +1262,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index e89fb35fec713f62f5156f27541b5b7911130828..0c43d7fe893cce32896204fa701272b073e4b08a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -27,6 +27,19 @@ struct amdgpu_mmhub_funcs { void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*reset_ras_error_count)(struct amdgpu_device *adev); + u64 (*get_fb_location)(struct amdgpu_device *adev); + void (*init)(struct amdgpu_device *adev); + int (*gart_enable)(struct amdgpu_device *adev); + void (*set_fault_enable_default)(struct amdgpu_device *adev, + bool value); + void (*gart_disable)(struct amdgpu_device *adev); + int (*set_clockgating)(struct amdgpu_device *adev, + enum amd_clockgating_state state); + void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags); + void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); + void (*update_power_gating)(struct amdgpu_device *adev, + bool enable); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index eaff4c4506ebd8216a803735d487f0ecd5321541..ac043baac05d6d234ee726794176cf624ccccb5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -380,6 +380,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (r) return r; + if ((*bo_ptr) == NULL) + return 0; + /* * Remove the original mem node and create a new one at the request * position. @@ -558,7 +561,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; - drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size); + drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); INIT_LIST_HEAD(&bo->shadow_list); bo->vm_bo = NULL; bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : @@ -1305,7 +1308,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, } /** - * amdgpu_bo_move_notify - notification about a BO being released + * amdgpu_bo_release_notify - notification about a BO being released * @bo: pointer to a buffer object * * Wipes VRAM buffers whose contents should not be leaked before the diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 1311d6aec5d4b3c17e5b391659c02f1fd762d41b..69af462db34dfd31933faecb738ec647cb0075a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -226,7 +226,7 @@ static int init_pmu_by_type(struct amdgpu_device *adev, pmu_entry->pmu.attr_groups = attr_groups; pmu_entry->pmu_perf_type = pmu_perf_type; snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", - pmu_file_prefix, adev->ddev->primary->index); + pmu_file_prefix, adev_to_drm(adev)->primary->index); ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 7fe564275457f661edcdf148f4430b4666080e55..d6c38e24f130dde8de5feedd5a13b73cb8e5dbdb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -522,8 +522,7 @@ static int psp_asd_load(struct psp_context *psp) * add workaround to bypass it for sriov now. * TODO: add version check to make it common */ - if (amdgpu_sriov_vf(psp->adev) || - (psp->adev->asic_type == CHIP_NAVY_FLOUNDER)) + if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw) return 0; cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); @@ -1430,6 +1429,168 @@ static int psp_dtm_terminate(struct psp_context *psp) } // DTM end +// RAP start +static int psp_rap_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for rap ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->rap_context.rap_shared_bo, + &psp->rap_context.rap_shared_mc_addr, + &psp->rap_context.rap_shared_buf); + + return ret; +} + +static int psp_rap_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size); + + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_rap_ucode_size, + psp->rap_context.rap_shared_mc_addr, + PSP_RAP_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->rap_context.rap_initialized = true; + psp->rap_context.session_id = cmd->resp.session_id; + mutex_init(&psp->rap_context.mutex); + } + + kfree(cmd); + + return ret; +} + +static int psp_rap_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_rap_initialize(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_rap_ucode_size || + !psp->adev->psp.ta_rap_start_addr) { + dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); + return 0; + } + + if (!psp->rap_context.rap_initialized) { + ret = psp_rap_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_rap_load(psp); + if (ret) + return ret; + + ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE); + if (ret != TA_RAP_STATUS__SUCCESS) { + psp_rap_unload(psp); + + amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, + &psp->rap_context.rap_shared_mc_addr, + &psp->rap_context.rap_shared_buf); + + psp->rap_context.rap_initialized = false; + + dev_warn(psp->adev->dev, "RAP TA initialize fail.\n"); + return -EINVAL; + } + + return 0; +} + +static int psp_rap_terminate(struct psp_context *psp) +{ + int ret; + + if (!psp->rap_context.rap_initialized) + return 0; + + ret = psp_rap_unload(psp); + + psp->rap_context.rap_initialized = false; + + /* free rap shared memory */ + amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, + &psp->rap_context.rap_shared_mc_addr, + &psp->rap_context.rap_shared_buf); + + return ret; +} + +int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + struct ta_rap_shared_memory *rap_cmd; + int ret; + + if (!psp->rap_context.rap_initialized) + return -EINVAL; + + if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && + ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) + return -EINVAL; + + mutex_lock(&psp->rap_context.mutex); + + rap_cmd = (struct ta_rap_shared_memory *) + psp->rap_context.rap_shared_buf; + memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); + + rap_cmd->cmd_id = ta_cmd_id; + rap_cmd->validation_method_id = METHOD_A; + + ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id); + if (ret) { + mutex_unlock(&psp->rap_context.mutex); + return ret; + } + + mutex_unlock(&psp->rap_context.mutex); + + return rap_cmd->rap_status; +} +// RAP end + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -1707,7 +1868,7 @@ static int psp_load_smu_fw(struct psp_context *psp) return 0; - if (adev->in_gpu_reset && ras && ras->supported) { + if (amdgpu_in_reset(adev) && ras && ras->supported) { ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); if (ret) { DRM_WARN("Failed to set MP1 state prepare for reload\n"); @@ -1822,7 +1983,7 @@ static int psp_load_fw(struct amdgpu_device *adev) int ret; struct psp_context *psp = &adev->psp; - if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ goto skip_memalloc; } @@ -1892,6 +2053,11 @@ static int psp_load_fw(struct amdgpu_device *adev) if (ret) dev_err(psp->adev->dev, "DTM: Failed to initialize DTM\n"); + + ret = psp_rap_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAP: Failed to initialize RAP\n"); } return 0; @@ -1942,6 +2108,7 @@ static int psp_hw_fini(void *handle) if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); + psp_rap_terminate(psp); psp_dtm_terminate(psp); psp_hdcp_terminate(psp); } @@ -2000,6 +2167,11 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate dtm ta\n"); return ret; } + ret = psp_rap_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate rap ta\n"); + return ret; + } } ret = psp_asd_unload(psp); @@ -2078,6 +2250,11 @@ static int psp_resume(void *handle) if (ret) dev_err(psp->adev->dev, "DTM: Failed to initialize DTM\n"); + + ret = psp_rap_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "RAP: Failed to initialize RAP\n"); } mutex_unlock(&adev->firmware.mutex); @@ -2343,6 +2520,11 @@ int parse_ta_bin_descriptor(struct psp_context *psp, psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes); psp->ta_dtm_start_addr = ucode_start_addr; break; + case TA_FW_TYPE_PSP_RAP: + psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version); + psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes); + psp->ta_rap_start_addr = ucode_start_addr; + break; default: dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); break; @@ -2421,7 +2603,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t fw_ver; int ret; @@ -2448,7 +2630,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); void *cpu_addr; dma_addr_t dma_addr; int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 623888bf30cb7b2097833204186c400a4970b0a5..919d2fb7427b151358e4aea2be1c33a5e41355c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -29,6 +29,7 @@ #include "psp_gfx_if.h" #include "ta_xgmi_if.h" #include "ta_ras_if.h" +#include "ta_rap_if.h" #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 @@ -38,6 +39,7 @@ #define PSP_TMR_SIZE 0x400000 #define PSP_HDCP_SHARED_MEM_SIZE 0x4000 #define PSP_DTM_SHARED_MEM_SIZE 0x4000 +#define PSP_RAP_SHARED_MEM_SIZE 0x4000 #define PSP_SHARED_MEM_SIZE 0x4000 struct psp_context; @@ -159,6 +161,15 @@ struct psp_dtm_context { struct mutex mutex; }; +struct psp_rap_context { + bool rap_initialized; + uint32_t session_id; + struct amdgpu_bo *rap_shared_bo; + uint64_t rap_shared_mc_addr; + void *rap_shared_buf; + struct mutex mutex; +}; + #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 @@ -277,11 +288,16 @@ struct psp_context uint32_t ta_dtm_ucode_size; uint8_t *ta_dtm_start_addr; + uint32_t ta_rap_ucode_version; + uint32_t ta_rap_ucode_size; + uint8_t *ta_rap_start_addr; + struct psp_asd_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; struct psp_hdcp_context hdcp_context; struct psp_dtm_context dtm_context; + struct psp_rap_context rap_context; struct mutex mutex; struct psp_memory_training_context mem_train_ctx; }; @@ -357,6 +373,7 @@ int psp_ras_trigger_error(struct psp_context *psp, int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c new file mode 100644 index 0000000000000000000000000000000000000000..8da5356c36f174eddc8e2ef79f00466711c7abc5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c @@ -0,0 +1,127 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include + +#include "amdgpu.h" +#include "amdgpu_rap.h" + +/** + * DOC: AMDGPU RAP debugfs test interface + * + * how to use? + * echo opcode > /dri/xxx/rap_test + * + * opcode: + * currently, only 2 is supported by Linux host driver, + * opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to + * trigger L0 policy validation, you can refer more detail + * from header file ta_rap_if.h + * + */ +static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct ta_rap_shared_memory *rap_shared_mem; + struct ta_rap_cmd_output_data *rap_cmd_output; + struct drm_device *dev = adev_to_drm(adev); + uint32_t op; + int ret; + + if (*pos || size != 2) + return -EINVAL; + + ret = kstrtouint_from_user(buf, size, *pos, &op); + if (ret) + return ret; + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); + return ret; + } + + /* make sure gfx core is on, RAP TA cann't handle + * GFX OFF case currently. + */ + amdgpu_gfx_off_ctrl(adev, false); + + switch (op) { + case 2: + ret = psp_rap_invoke(&adev->psp, op); + + if (ret == TA_RAP_STATUS__SUCCESS) { + dev_info(adev->dev, "RAP L0 validate test success.\n"); + } else { + rap_shared_mem = (struct ta_rap_shared_memory *) + adev->psp.rap_context.rap_shared_buf; + rap_cmd_output = &(rap_shared_mem->rap_out_message.output); + + dev_info(adev->dev, "RAP test failed, the output is:\n"); + dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n", + rap_cmd_output->last_subsection); + dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n", + rap_cmd_output->num_total_validate); + dev_info(adev->dev, "\tnum_valid: 0x%08x.\n", + rap_cmd_output->num_valid); + dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n", + rap_cmd_output->last_validate_addr); + dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n", + rap_cmd_output->last_validate_val); + dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n", + rap_cmd_output->last_validate_val_exptd); + } + break; + default: + dev_info(adev->dev, "Unsupported op id: %d, ", op); + dev_info(adev->dev, "Only support op 2(L0 validate test).\n"); + } + + amdgpu_gfx_off_ctrl(adev, true); + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return size; +} + +static const struct file_operations amdgpu_rap_debugfs_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_rap_debugfs_write, + .llseek = default_llseek +}; + +void amdgpu_rap_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + + if (!adev->psp.rap_context.rap_initialized) + return; + + debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root, + adev, &amdgpu_rap_debugfs_ops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h new file mode 100644 index 0000000000000000000000000000000000000000..ec6d7632d3a0fc7d4ba0a0b151fa2d9bd55e1079 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _AMDGPU_RAP_H +#define _AMDGPU_RAP_H + +#include "amdgpu.h" + +void amdgpu_rap_debugfs_init(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index bcce4c0be4623fd0b758c1078fe0b90a832f2deb..e5ea14774c0cc5a47fd48722499745e6796bb844 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -34,6 +34,8 @@ #include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" +static const char *RAS_FS_NAME = "ras"; + const char *ras_error_string[] = { "none", "parity", @@ -62,13 +64,14 @@ const char *ras_block_string[] = { #define ras_err_str(i) (ras_error_string[ffs(i)]) #define ras_block_str(i) (ras_block_string[i]) -#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1 -#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) +/* typical ECC bad page rate(1 bad page per 100MB VRAM) */ +#define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL) + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -367,12 +370,19 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct amdgpu_device *adev = + (struct amdgpu_device *)file_inode(f)->i_private; int ret; - ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); + ret = amdgpu_ras_eeprom_reset_table( + &(amdgpu_ras_get_context(adev)->eeprom_control)); - return ret == 1 ? size : -EIO; + if (ret == 1) { + amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; + return size; + } else { + return -EIO; + } } static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { @@ -1017,6 +1027,33 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); } +static void amdgpu_ras_sysfs_add_bad_page_node(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct attribute_group group; + struct bin_attribute *bin_attrs[] = { + &con->badpages_attr, + NULL, + }; + + con->badpages_attr = (struct bin_attribute) { + .attr = { + .name = "gpu_vram_bad_pages", + .mode = S_IRUGO, + }, + .size = 0, + .private = NULL, + .read = amdgpu_ras_sysfs_badpages_read, + }; + + group.name = RAS_FS_NAME; + group.bin_attrs = bin_attrs; + + sysfs_bin_attr_init(bin_attrs[0]); + + sysfs_update_group(&adev->dev->kobj, &group); +} + static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1024,14 +1061,9 @@ static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) &con->features_attr.attr, NULL }; - struct bin_attribute *bin_attrs[] = { - &con->badpages_attr, - NULL - }; struct attribute_group group = { - .name = "ras", + .name = RAS_FS_NAME, .attrs = attrs, - .bin_attrs = bin_attrs, }; con->features_attr = (struct device_attribute) { @@ -1042,22 +1074,20 @@ static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) .show = amdgpu_ras_sysfs_features_read, }; - con->badpages_attr = (struct bin_attribute) { - .attr = { - .name = "gpu_vram_bad_pages", - .mode = S_IRUGO, - }, - .size = 0, - .private = NULL, - .read = amdgpu_ras_sysfs_badpages_read, - }; - sysfs_attr_init(attrs[0]); - sysfs_bin_attr_init(bin_attrs[0]); return sysfs_create_group(&adev->dev->kobj, &group); } +static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + sysfs_remove_file_from_group(&adev->dev->kobj, + &con->badpages_attr.attr, + RAS_FS_NAME); +} + static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1065,14 +1095,9 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) &con->features_attr.attr, NULL }; - struct bin_attribute *bin_attrs[] = { - &con->badpages_attr, - NULL - }; struct attribute_group group = { - .name = "ras", + .name = RAS_FS_NAME, .attrs = attrs, - .bin_attrs = bin_attrs, }; sysfs_remove_group(&adev->dev->kobj, &group); @@ -1105,7 +1130,7 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, if (sysfs_add_file_to_group(&adev->dev->kobj, &obj->sysfs_attr.attr, - "ras")) { + RAS_FS_NAME)) { put_obj(obj); return -EINVAL; } @@ -1125,7 +1150,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, sysfs_remove_file_from_group(&adev->dev->kobj, &obj->sysfs_attr.attr, - "ras"); + RAS_FS_NAME); obj->attr_inuse = 0; put_obj(obj); @@ -1141,6 +1166,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_sysfs_remove(adev, &obj->head); } + if (amdgpu_bad_page_threshold != 0) + amdgpu_ras_sysfs_remove_bad_page_node(adev); + amdgpu_ras_sysfs_remove_feature_node(adev); return 0; @@ -1169,9 +1197,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct drm_minor *minor = adev->ddev->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; - con->dir = debugfs_create_dir("ras", minor->debugfs_root); + con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, adev, &amdgpu_ras_debugfs_ctrl_ops); debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, @@ -1187,6 +1215,13 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) */ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, &con->reboot); + + /* + * User could set this not to clean up hardware's error count register + * of RAS IPs during ras recovery. + */ + debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, + con->dir, &con->disable_ras_err_cnt_harvest); } void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, @@ -1211,6 +1246,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) { +#if defined(CONFIG_DEBUG_FS) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; struct ras_fs_if fs_info; @@ -1233,6 +1269,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_create(adev, &fs_info); } } +#endif } void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, @@ -1243,13 +1280,13 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, if (!obj || !obj->ent) return; - debugfs_remove(obj->ent); obj->ent = NULL; put_obj(obj); } static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) { +#if defined(CONFIG_DEBUG_FS) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj, *tmp; @@ -1257,8 +1294,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_remove(adev, &obj->head); } - debugfs_remove_recursive(con->dir); con->dir = NULL; +#endif } /* debugfs end */ @@ -1268,6 +1305,9 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { amdgpu_ras_sysfs_create_feature_node(adev); + if (amdgpu_bad_page_threshold != 0) + amdgpu_ras_sysfs_add_bad_page_node(adev); + return 0; } @@ -1514,23 +1554,28 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct amdgpu_device *remote_adev = NULL; struct amdgpu_device *adev = ras->adev; struct list_head device_list, *device_list_handle = NULL; - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false); - - /* Build list of devices to query RAS related errors */ - if (hive && adev->gmc.xgmi.num_physical_nodes > 1) - device_list_handle = &hive->device_list; - else { - INIT_LIST_HEAD(&device_list); - list_add_tail(&adev->gmc.xgmi.head, &device_list); - device_list_handle = &device_list; - } - list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) { - amdgpu_ras_log_on_err_counter(remote_adev); + if (!ras->disable_ras_err_cnt_harvest) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + + /* Build list of devices to query RAS related errors */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { + device_list_handle = &hive->device_list; + } else { + INIT_LIST_HEAD(&device_list); + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } + + list_for_each_entry(remote_adev, + device_list_handle, gmc.xgmi.head) + amdgpu_ras_log_on_err_counter(remote_adev); + + amdgpu_put_xgmi_hive(hive); } if (amdgpu_device_should_recover_gpu(ras->adev)) - amdgpu_device_gpu_recover(ras->adev, 0); + amdgpu_device_gpu_recover(ras->adev, NULL); atomic_set(&ras->in_recovery, 0); } @@ -1645,7 +1690,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) int ret = 0; /* no bad page record, skip eeprom access */ - if (!control->num_recs) + if (!control->num_recs || (amdgpu_bad_page_threshold == 0)) return ret; bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); @@ -1699,6 +1744,47 @@ static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, return ret; } +static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, + uint32_t max_length) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int tmp_threshold = amdgpu_bad_page_threshold; + u64 val; + + /* + * Justification of value bad_page_cnt_threshold in ras structure + * + * Generally, -1 <= amdgpu_bad_page_threshold <= max record length + * in eeprom, and introduce two scenarios accordingly. + * + * Bad page retirement enablement: + * - If amdgpu_bad_page_threshold = -1, + * bad_page_cnt_threshold = typical value by formula. + * + * - When the value from user is 0 < amdgpu_bad_page_threshold < + * max record length in eeprom, use it directly. + * + * Bad page retirement disablement: + * - If amdgpu_bad_page_threshold = 0, bad page retirement + * functionality is disabled, and bad_page_cnt_threshold will + * take no effect. + */ + + if (tmp_threshold < -1) + tmp_threshold = -1; + else if (tmp_threshold > max_length) + tmp_threshold = max_length; + + if (tmp_threshold == -1) { + val = adev->gmc.mc_vram_size; + do_div(val, RAS_BAD_PAGE_RATE); + con->bad_page_cnt_threshold = min(lower_32_bits(val), + max_length); + } else { + con->bad_page_cnt_threshold = tmp_threshold; + } +} + /* called in gpu recovery/init */ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) { @@ -1708,7 +1794,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) struct amdgpu_bo *bo = NULL; int i, ret = 0; - if (!con || !con->eh_data) + /* Not reserve bad page when amdgpu_bad_page_threshold == 0. */ + if (!con || !con->eh_data || (amdgpu_bad_page_threshold == 0)) return 0; mutex_lock(&con->recovery_lock); @@ -1776,6 +1863,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data **data; + uint32_t max_eeprom_records_len = 0; + bool exc_err_limit = false; int ret; if (con) @@ -1794,8 +1883,15 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) atomic_set(&con->in_recovery, 0); con->adev = adev; - ret = amdgpu_ras_eeprom_init(&con->eeprom_control); - if (ret) + max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length(); + amdgpu_ras_validate_threshold(adev, max_eeprom_records_len); + + ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); + /* + * This calling fails when exc_err_limit is true or + * ret != 0. + */ + if (exc_err_limit || ret) goto free; if (con->eeprom_control.num_recs) { @@ -1819,6 +1915,15 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) out: dev_warn(adev->dev, "Failed to initialize ras recovery!\n"); + /* + * Except error threshold exceeding case, other failure cases in this + * function would not fail amdgpu driver init. + */ + if (!exc_err_limit) + ret = 0; + else + ret = -EINVAL; + return ret; } @@ -1858,6 +1963,17 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, return 0; } +static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev) +{ + if (adev->asic_type != CHIP_VEGA10 && + adev->asic_type != CHIP_VEGA20 && + adev->asic_type != CHIP_ARCTURUS && + adev->asic_type != CHIP_SIENNA_CICHLID) + return 1; + else + return 0; +} + /* * check hardware's ras ability which will be saved in hw_supported. * if hardware does not support ras, we can skip some ras initializtion and @@ -1874,8 +1990,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = 0; if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || - (adev->asic_type != CHIP_VEGA20 && - adev->asic_type != CHIP_ARCTURUS)) + amdgpu_ras_check_asic_type(adev)) return; if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { @@ -1897,6 +2012,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, *supported = amdgpu_ras_enable == 0 ? 0 : *hw_supported & amdgpu_ras_mask; + adev->ras_features = *supported; } int amdgpu_ras_init(struct amdgpu_device *adev) @@ -1919,9 +2035,9 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_check_supported(adev, &con->hw_supported, &con->supported); - if (!con->hw_supported) { + if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) { r = 0; - goto err_out; + goto release_con; } con->features = 0; @@ -1932,25 +2048,25 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (adev->nbio.funcs->init_ras_controller_interrupt) { r = adev->nbio.funcs->init_ras_controller_interrupt(adev); if (r) - goto err_out; + goto release_con; } if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); if (r) - goto err_out; + goto release_con; } if (amdgpu_ras_fs_init(adev)) { r = -EINVAL; - goto err_out; + goto release_con; } dev_info(adev->dev, "RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); return 0; -err_out: +release_con: amdgpu_ras_set_context(adev, NULL); kfree(con); @@ -1978,7 +2094,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, amdgpu_ras_request_reset_on_boot(adev, ras_block->block); return 0; - } else if (adev->in_suspend || adev->in_gpu_reset) { + } else if (adev->in_suspend || amdgpu_in_reset(adev)) { /* in resume phase, if fail to enable ras, * clean up all ras fs nodes, and disable ras */ goto cleanup; @@ -1987,7 +2103,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, } /* in resume phase, no need to create ras fs node */ - if (adev->in_suspend || adev->in_gpu_reset) + if (adev->in_suspend || amdgpu_in_reset(adev)) return 0; if (ih_info->cb) { @@ -2145,3 +2261,19 @@ bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) return false; } + +bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + bool exc_err_limit = false; + + if (con && (amdgpu_bad_page_threshold != 0)) + amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control, + &exc_err_limit); + + /* + * We are only interested in variable exc_err_limit, + * as it says if GPU is in bad state or not. + */ + return exc_err_limit; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index b2667342cf674e73439c0061e7725ee844e44ef5..6b8d7bb83bb3c30c33962a5bbf19145c760e81f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -31,6 +31,10 @@ #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" +#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) +#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1) +#define AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV (0x1 << 2) + enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__SDMA, @@ -336,6 +340,12 @@ struct amdgpu_ras { struct amdgpu_ras_eeprom_control eeprom_control; bool error_query_ready; + + /* bad page count threshold */ + uint32_t bad_page_cnt_threshold; + + /* disable ras error count harvest in recovery */ + bool disable_ras_err_cnt_harvest; }; struct ras_fs_data { @@ -490,6 +500,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev); unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, bool is_ce); +bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev); + /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, struct eeprom_table_record *bps, int pages); @@ -500,10 +512,14 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - /* save bad page to eeprom before gpu reset, - * i2c may be unstable in gpu reset + /* + * Save bad page to eeprom before gpu reset, i2c may be unstable + * in gpu reset. + * + * Also, exclude the case when ras recovery issuer is + * eeprom page write itself. */ - if (in_task()) + if (!(ras->flags & AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV) && in_task()) amdgpu_ras_reserve_bad_pages(adev); if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index c0096097bbcf47fa6fa33745df66b1671c6989b0..0e64c39a23722ac82572649a2a63d907ebe75ce6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -46,6 +46,9 @@ #define EEPROM_TABLE_HDR_VAL 0x414d4452 #define EEPROM_TABLE_VER 0x00010000 +/* Bad GPU tag ‘BADG’ */ +#define EEPROM_TABLE_HDR_BAD 0x42414447 + /* Assume 2 Mbit size */ #define EEPROM_SIZE_BYTES 256000 #define EEPROM_PAGE__SIZE_BYTES 256 @@ -56,6 +59,15 @@ #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev +static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) +{ + if ((adev->asic_type == CHIP_VEGA20) || + (adev->asic_type == CHIP_ARCTURUS)) + return true; + + return false; +} + static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev, uint16_t *i2c_addr) { @@ -213,6 +225,24 @@ static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, return true; } +static int amdgpu_ras_eeprom_correct_header_tag( + struct amdgpu_ras_eeprom_control *control, + uint32_t header) +{ + unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE]; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + int ret = 0; + + memset(buff, 0, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE); + + mutex_lock(&control->tbl_mutex); + hdr->header = header; + ret = __update_table_header(control, buff); + mutex_unlock(&control->tbl_mutex); + + return ret; +} + int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) { unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; @@ -238,12 +268,14 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) } -int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) +int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, + bool *exceed_err_limit) { int ret = 0; struct amdgpu_device *adev = to_amdgpu_device(control); unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); struct i2c_msg msg = { .addr = 0, .flags = I2C_M_RD, @@ -251,6 +283,11 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) .buf = buff, }; + *exceed_err_limit = false; + + if (!__is_ras_eeprom_supported(adev)) + return 0; + /* Verify i2c adapter is initialized */ if (!adev->pm.smu_i2c.algo) return -ENOENT; @@ -279,6 +316,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", control->num_recs); + } else if ((hdr->header == EEPROM_TABLE_HDR_BAD) && + (amdgpu_bad_page_threshold != 0)) { + if (ras->bad_page_cnt_threshold > control->num_recs) { + dev_info(adev->dev, "Using one valid bigger bad page " + "threshold and correcting eeprom header tag.\n"); + ret = amdgpu_ras_eeprom_correct_header_tag(control, + EEPROM_TABLE_HDR_VAL); + } else { + *exceed_err_limit = true; + dev_err(adev->dev, "Exceeding the bad_page_threshold parameter, " + "disabling the GPU.\n"); + } } else { DRM_INFO("Creating new EEPROM table"); @@ -375,6 +424,49 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address) return curr_address; } +int amdgpu_ras_eeprom_check_err_threshold( + struct amdgpu_ras_eeprom_control *control, + bool *exceed_err_limit) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + unsigned char buff[EEPROM_ADDRESS_SIZE + + EEPROM_TABLE_HEADER_SIZE] = { 0 }; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct i2c_msg msg = { + .addr = control->i2c_address, + .flags = I2C_M_RD, + .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE, + .buf = buff, + }; + int ret; + + *exceed_err_limit = false; + + if (!__is_ras_eeprom_supported(adev)) + return 0; + + /* read EEPROM table header */ + mutex_lock(&control->tbl_mutex); + ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1); + if (ret < 1) { + dev_err(adev->dev, "Failed to read EEPROM table header.\n"); + goto err; + } + + __decode_table_header_from_buff(hdr, &buff[2]); + + if (hdr->header == EEPROM_TABLE_HDR_BAD) { + dev_warn(adev->dev, "This GPU is in BAD status."); + dev_warn(adev->dev, "Please retire it or setting one bigger " + "threshold value when reloading driver.\n"); + *exceed_err_limit = true; + } + +err: + mutex_unlock(&control->tbl_mutex); + return 0; +} + int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, bool write, @@ -383,10 +475,12 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, int i, ret = 0; struct i2c_msg *msgs, *msg; unsigned char *buffs, *buff; + bool sched_ras_recovery = false; struct eeprom_table_record *record; struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS) + if (!__is_ras_eeprom_supported(adev)) return 0; buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE, @@ -402,11 +496,30 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, goto free_buff; } + /* + * If saved bad pages number exceeds the bad page threshold for + * the whole VRAM, update table header to mark the BAD GPU tag + * and schedule one ras recovery after eeprom write is done, + * this can avoid the missing for latest records. + * + * This new header will be picked up and checked in the bootup + * by ras recovery, which may break bootup process to notify + * user this GPU is in bad state and to retire such GPU for + * further check. + */ + if (write && (amdgpu_bad_page_threshold != 0) && + ((control->num_recs + num) >= ras->bad_page_cnt_threshold)) { + dev_warn(adev->dev, + "Saved bad pages(%d) reaches threshold value(%d).\n", + control->num_recs + num, ras->bad_page_cnt_threshold); + control->tbl_hdr.header = EEPROM_TABLE_HDR_BAD; + sched_ras_recovery = true; + } + /* In case of overflow just start from beginning to not lose newest records */ if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES)) control->next_addr = EEPROM_RECORD_START; - /* * TODO Currently makes EEPROM writes for each record, this creates * internal fragmentation. Optimized the code to do full page write of @@ -482,6 +595,20 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, __update_tbl_checksum(control, records, num, old_hdr_byte_sum); __update_table_header(control, buffs); + + if (sched_ras_recovery) { + /* + * Before scheduling ras recovery, assert the related + * flag first, which shall bypass common bad page + * reservation execution in amdgpu_ras_reset_gpu. + */ + amdgpu_ras_get_context(adev)->flags |= + AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV; + + dev_warn(adev->dev, "Conduct ras recovery due to bad " + "page threshold reached.\n"); + amdgpu_ras_reset_gpu(adev); + } } else if (!__validate_tbl_checksum(control, records, num)) { DRM_WARN("EEPROM Table checksum mismatch!"); /* TODO Uncomment when EEPROM read/write is relliable */ @@ -499,6 +626,11 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, return ret == num ? 0 : -EIO; } +inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void) +{ + return EEPROM_MAX_RECORD_NUM; +} + /* Used for testing if bugs encountered */ #if 0 void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 9e7d640920fb1cd177db676cb0c411781312d571..c7a5e5c7c61ebb0656000a2eb51f4aa9793b8d75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -76,14 +76,21 @@ struct eeprom_table_record { unsigned char mcumc_id; }__attribute__((__packed__)); -int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); +int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, + bool *exceed_err_limit); int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); +int amdgpu_ras_eeprom_check_err_threshold( + struct amdgpu_ras_eeprom_control *control, + bool *exceed_err_limit); + int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, bool write, int num); +inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void); + void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control); #endif // _AMDGPU_RAS_EEPROM_H diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 13ea8ebc421c6e47bad20b525ab525883c3d852f..15ee13c3bd9e191e28eba342db182d82606cc654 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, &ring->sched; } - for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i) atomic_set(&ring->num_jobs[i], 0); return 0; @@ -420,7 +420,7 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) - struct drm_minor *minor = adev->ddev->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *ent, *root = minor->debugfs_root; char name[32]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index da871d84b74245c30f2ddd6af02472eb67ac0427..7112137689db002b7b73ad600b08578f39b27a57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -243,7 +243,7 @@ struct amdgpu_ring { bool has_compute_vm_bug; bool no_scheduler; - atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; + atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT]; struct mutex priority_mutex; /* protected by priority_mutex */ int priority; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index c799691dfa848ced431fb9082bafef61aa93facc..0da0a0d9867209194b03fd8a6623532bb3235502 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -32,24 +32,32 @@ #include "amdgpu_vm.h" -enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) +int amdgpu_to_sched_priority(int amdgpu_priority, + enum drm_sched_priority *prio) { switch (amdgpu_priority) { case AMDGPU_CTX_PRIORITY_VERY_HIGH: - return DRM_SCHED_PRIORITY_HIGH_HW; + *prio = DRM_SCHED_PRIORITY_HIGH; + break; case AMDGPU_CTX_PRIORITY_HIGH: - return DRM_SCHED_PRIORITY_HIGH_SW; + *prio = DRM_SCHED_PRIORITY_HIGH; + break; case AMDGPU_CTX_PRIORITY_NORMAL: - return DRM_SCHED_PRIORITY_NORMAL; + *prio = DRM_SCHED_PRIORITY_NORMAL; + break; case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_LOW; + *prio = DRM_SCHED_PRIORITY_MIN; + break; case AMDGPU_CTX_PRIORITY_UNSET: - return DRM_SCHED_PRIORITY_UNSET; + *prio = DRM_SCHED_PRIORITY_UNSET; + break; default: WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return DRM_SCHED_PRIORITY_INVALID; + return -EINVAL; } + + return 0; } static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, @@ -115,13 +123,24 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_sched *args = data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); enum drm_sched_priority priority; int r; - priority = amdgpu_to_sched_priority(args->in.priority); - if (priority == DRM_SCHED_PRIORITY_INVALID) + /* First check the op, then the op's argument. + */ + switch (args->in.op) { + case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: + case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: + break; + default: + DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); return -EINVAL; + } + + r = amdgpu_to_sched_priority(args->in.priority, &priority); + if (r) + return r; switch (args->in.op) { case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: @@ -136,7 +155,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, priority); break; default: - DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); + /* Impossible. + */ r = -EINVAL; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h index 12299fd95691cb2845fd6c4f9bb8c2b1bebd194b..67e5b2472f6a84a740a626fb3c60c525f8affec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h @@ -30,7 +30,8 @@ enum drm_sched_priority; struct drm_device; struct drm_file; -enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); +int amdgpu_to_sched_priority(int amdgpu_priority, + enum drm_sched_priority *prio); int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 651365183e75f5fd61c7591f46c870e72e9932cb..6fc3af082f6fdd2e4fd3b0c8e2b1d1ec27ceb12d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1704,8 +1704,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = { */ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) { - amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, - NULL, &adev->fw_vram_usage.va); + amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, + NULL, &adev->mman.fw_vram_usage_va); } /** @@ -1719,19 +1719,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { uint64_t vram_size = adev->gmc.visible_vram_size; - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; + adev->mman.fw_vram_usage_va = NULL; + adev->mman.fw_vram_usage_reserved_bo = NULL; - if (adev->fw_vram_usage.size == 0 || - adev->fw_vram_usage.size > vram_size) + if (adev->mman.fw_vram_usage_size == 0 || + adev->mman.fw_vram_usage_size > vram_size) return 0; return amdgpu_bo_create_kernel_at(adev, - adev->fw_vram_usage.start_offset, - adev->fw_vram_usage.size, + adev->mman.fw_vram_usage_start_offset, + adev->mman.fw_vram_usage_size, AMDGPU_GEM_DOMAIN_VRAM, - &adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); + &adev->mman.fw_vram_usage_reserved_bo, + &adev->mman.fw_vram_usage_va); } /* @@ -1763,7 +1763,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev) memset(ctx, 0, sizeof(*ctx)); ctx->c2p_train_data_offset = - ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M); + ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M); ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); ctx->train_data_size = @@ -1802,10 +1802,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) * Otherwise, fallback to legacy approach to check and reserve tmr block for ip * discovery data and G6 memory training data respectively */ - adev->discovery_tmr_size = + adev->mman.discovery_tmr_size = amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); - if (!adev->discovery_tmr_size) - adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET; + if (!adev->mman.discovery_tmr_size) + adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET; if (mem_train_support) { /* reserve vram for mem train according to TMR location */ @@ -1825,14 +1825,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) } ret = amdgpu_bo_create_kernel_at(adev, - adev->gmc.real_vram_size - adev->discovery_tmr_size, - adev->discovery_tmr_size, + adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, + adev->mman.discovery_tmr_size, AMDGPU_GEM_DOMAIN_VRAM, - &adev->discovery_memory, + &adev->mman.discovery_memory, NULL); if (ret) { DRM_ERROR("alloc tmr failed(%d)!\n", ret); - amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); return ret; } @@ -1853,15 +1853,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) uint64_t gtt_size; int r; u64 vis_vram_limit; - void *stolen_vga_buf; mutex_init(&adev->mman.gtt_window_lock); /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, - adev->ddev->anon_inode->i_mapping, - adev->ddev->vma_offset_manager, + adev_to_drm(adev)->anon_inode->i_mapping, + adev_to_drm(adev)->vma_offset_manager, dma_addressing_limited(adev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -1906,7 +1905,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) * If IP discovery enabled, a block of memory should be * reserved for IP discovey. */ - if (adev->discovery_bin) { + if (adev->mman.discovery_bin) { r = amdgpu_ttm_reserve_tmr(adev); if (r) return r; @@ -1916,10 +1915,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS * and driver. */ - r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->stolen_vga_memory, - NULL, &stolen_vga_buf); + r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_vga_memory, + NULL); + if (r) + return r; + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, + adev->mman.stolen_extended_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_extended_memory, + NULL); if (r) return r; @@ -1975,9 +1981,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) */ void amdgpu_ttm_late_init(struct amdgpu_device *adev) { - void *stolen_vga_buf; /* return the VGA stolen memory (if any) back to VRAM */ - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); + if (!adev->mman.keep_stolen_vga_memory) + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); } /** @@ -1989,8 +1996,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) return; amdgpu_ttm_training_reserve_vram_fini(adev); + /* return the stolen vga memory back to VRAM */ + if (adev->mman.keep_stolen_vga_memory) + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); /* return the IP Discovery TMR memory back to VRAM */ - amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); if (adev->mman.aper_base_kaddr) @@ -2022,7 +2032,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) uint64_t size; int r; - if (!adev->mman.initialized || adev->in_gpu_reset || + if (!adev->mman.initialized || amdgpu_in_reset(adev) || adev->mman.buffer_funcs_enabled == enable) return; @@ -2033,7 +2043,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ring = adev->mman.buffer_funcs_ring; sched = &ring->sched; r = drm_sched_entity_init(&adev->mman.entity, - DRM_SCHED_PRIORITY_KERNEL, &sched, + DRM_SCHED_PRIORITY_KERNEL, &sched, 1, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", @@ -2058,7 +2068,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv = filp->private_data; - struct amdgpu_device *adev = file_priv->minor->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); if (adev == NULL) return -EINVAL; @@ -2239,7 +2249,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; unsigned ttm_pl = (uintptr_t)node->info_ent->data; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl); struct drm_printer p = drm_seq_file_printer(m); @@ -2530,7 +2540,7 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) #if defined(CONFIG_DEBUG_FS) unsigned count; - struct drm_minor *minor = adev->ddev->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *ent, *root = minor->debugfs_root; for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 4f9426ecf0391d4e0b5a6ab1604f7e190b8289c6..a87951b2f06dd159ecb5e2b860e925d6e1c2a21a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -73,6 +73,23 @@ struct amdgpu_mman { struct amdgpu_vram_mgr vram_mgr; struct amdgpu_gtt_mgr gtt_mgr; + + uint64_t stolen_vga_size; + struct amdgpu_bo *stolen_vga_memory; + uint64_t stolen_extended_size; + struct amdgpu_bo *stolen_extended_memory; + bool keep_stolen_vga_memory; + + /* discovery */ + uint8_t *discovery_bin; + uint32_t discovery_tmr_size; + struct amdgpu_bo *discovery_memory; + + /* firmware VRAM reservation */ + u64 fw_vram_usage_start_offset; + u64 fw_vram_usage_size; + struct amdgpu_bo *fw_vram_usage_reserved_bo; + void *fw_vram_usage_va; }; struct amdgpu_copy_mem { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 183743c5fb7bf7af69ac1350b69929f8190e6995..55fe19a2f332276b5ef1d581b43c7e4db4909444 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -408,7 +408,7 @@ static ssize_t show_##name(struct device *dev, \ char *buf) \ { \ struct drm_device *ddev = dev_get_drvdata(dev); \ - struct amdgpu_device *adev = ddev->dev_private; \ + struct amdgpu_device *adev = drm_to_adev(ddev); \ \ return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \ } \ @@ -628,7 +628,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) struct amdgpu_firmware_info *ucode = NULL; /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */ - if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend)) + if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend)) return 0; /* * if SMU loaded firmware, it needn't add SMC, UVD, and VCE diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 12a8bc8fca0b0a1f81051a7777c5aba234858098..3c23c6293ff9472b64e1a9553c73ce96776b3aca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -131,6 +131,7 @@ enum ta_fw_type { TA_FW_TYPE_PSP_RAS, TA_FW_TYPE_PSP_HDCP, TA_FW_TYPE_PSP_DTM, + TA_FW_TYPE_PSP_RAP, }; struct ta_fw_bin_desc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index af1b1ccf613c98589a266e609951ada186af7ddb..262baf0f61ea0a70c2c8e5b7e2913b3de9468f86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -125,8 +125,9 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, "detected in UMC block\n", err_data->ue_count); - if (err_data->err_addr_cnt && - amdgpu_ras_add_bad_pages(adev, err_data->err_addr, + if ((amdgpu_bad_page_threshold != 0) && + err_data->err_addr_cnt && + amdgpu_ras_add_bad_pages(adev, err_data->err_addr, err_data->err_addr_cnt)) dev_warn(adev->dev, "Failed to add ras bad page!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index a615a1eb750be4b734f4bd130d9c4ab1ada7f603..18381449365800c70ca8e5855c85af2da94f8454 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -21,6 +21,20 @@ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ +/* + * (addr / 256) * 8192, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* channel index is the index of 256B block */ +#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) +/* offset in 256B block */ +#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) + +#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++) +#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) +#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) + struct amdgpu_umc_funcs { void (*err_cnt_init)(struct amdgpu_device *adev); int (*ras_late_init)(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 1203c20491e6d5781234370eaee0b314b5e785f7..f76961d1724602bcea0cdf7b98a422fa29aca4fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -45,7 +45,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) if (adev->mode_info.num_crtc == 0) adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; - adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; + adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; } @@ -93,7 +93,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, amdgpu_ring_undo(ring); spin_unlock_irqrestore(&kiq->ring_lock, flags); failed_kiq: - pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); + dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1); } /** @@ -401,7 +401,7 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev, if (bp_block_size) { bp_cnt = bp_block_size / sizeof(uint64_t); for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) { - retired_page = *(uint64_t *)(adev->fw_vram_usage.va + + retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va + bp_block_offset + bp_idx * sizeof(uint64_t)); bp.retired_page = retired_page; @@ -428,10 +428,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; - if (adev->fw_vram_usage.va != NULL) { + if (adev->mman.fw_vram_usage_va != NULL) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *)( - adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); + adev->mman.fw_vram_usage_va + AMDGIM_DATAEXCHANGE_OFFSET); AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f826945989c727a1a5b23fef67e7541e5d62c86f..b2046c3a404decd1c732d66b0d5af10cd975bb5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -325,9 +325,9 @@ static inline bool is_virtual_machine(void) #define amdgpu_sriov_is_pp_one_vf(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) #define amdgpu_sriov_is_debug(adev) \ - ((!adev->in_gpu_reset) && adev->virt.tdr_debug) + ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) #define amdgpu_sriov_is_normal(adev) \ - ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug)) + ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8bc2253939be9fdd401a297bc32d8d8f62f8f07c..420931d36732f6b087dfe5d16c5f5af826cf426d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "amdgpu.h" @@ -35,6 +36,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_gmc.h" #include "amdgpu_xgmi.h" +#include "amdgpu_dma_buf.h" /** * DOC: GPUVM @@ -1691,13 +1693,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t max_entries; uint64_t addr, last; + max_entries = mapping->last - start + 1; if (nodes) { addr = nodes->start << PAGE_SHIFT; - max_entries = (nodes->size - pfn) * - AMDGPU_GPU_PAGES_IN_CPU_PAGE; + max_entries = min((nodes->size - pfn) * + AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries); } else { addr = 0; - max_entries = S64_MAX; } if (pages_addr) { @@ -1727,7 +1729,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, addr += pfn << PAGE_SHIFT; } - last = min((uint64_t)mapping->last, start + max_entries - 1); + last = start + max_entries - 1; r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv, start, last, flags, addr, dma_addr, fence); @@ -1778,15 +1780,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, nodes = NULL; resv = vm->root.base.bo->tbo.base.resv; } else { + struct drm_gem_object *obj = &bo->tbo.base; struct ttm_dma_tt *ttm; + resv = bo->tbo.base.resv; + if (obj->import_attach && bo_va->is_xgmi) { + struct dma_buf *dma_buf = obj->import_attach->dmabuf; + struct drm_gem_object *gobj = dma_buf->priv; + struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); + + if (abo->tbo.mem.mem_type == TTM_PL_VRAM) + bo = gem_to_amdgpu_bo(gobj); + } mem = &bo->tbo.mem; nodes = mem->mm_node; if (mem->mem_type == TTM_PL_TT) { ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - resv = bo->tbo.base.resv; } if (bo) { @@ -2132,8 +2143,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && - (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) { + if (!bo) + return bo_va; + + if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) { bo_va->is_xgmi = true; /* Power up XGMI if it can be potentially used */ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); @@ -3209,7 +3222,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_vm *args = data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; long timeout = msecs_to_jiffies(2000); int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 7574be6cd7a06cb5262bddbf795b50352a93540f..b2adc2abc581053ef90af2be0cfe1d086d966f72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -50,7 +50,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size); } @@ -67,7 +67,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size); } @@ -84,8 +84,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_usage(man)); } @@ -102,8 +103,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + return snprintf(buf, PAGE_SIZE, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); } @@ -113,7 +115,7 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); switch (adev->gmc.vram_vendor) { case SAMSUNG: @@ -478,7 +480,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, unsigned int pages; int i, r; - *sgt = kmalloc(sizeof(*sg), GFP_KERNEL); + *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); if (!*sgt) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index e3a3755cb999a3b973f7b919a1228f20978d721e..1162913c8bf42a6f18be07fbf863798be45e3ee6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -35,11 +35,9 @@ static DEFINE_MUTEX(xgmi_mutex); -#define AMDGPU_MAX_XGMI_HIVE 8 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 -static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; -static unsigned hive_count = 0; +static LIST_HEAD(xgmi_hive_list); static const int xgmi_pcs_err_status_reg_vg20[] = { smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, @@ -171,65 +169,53 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { * */ +static struct attribute amdgpu_xgmi_hive_id = { + .name = "xgmi_hive_id", + .mode = S_IRUGO +}; -static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct amdgpu_hive_info *hive = - container_of(attr, struct amdgpu_hive_info, dev_attr); - - return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); -} +static struct attribute *amdgpu_xgmi_hive_attrs[] = { + &amdgpu_xgmi_hive_id, + NULL +}; -static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive) +static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj, + struct attribute *attr, char *buf) { - int ret = 0; + struct amdgpu_hive_info *hive = container_of( + kobj, struct amdgpu_hive_info, kobj); - if (WARN_ON(hive->kobj)) - return -EINVAL; - - hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj); - if (!hive->kobj) { - dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n"); - return -EINVAL; - } - - hive->dev_attr = (struct device_attribute) { - .attr = { - .name = "xgmi_hive_id", - .mode = S_IRUGO, - - }, - .show = amdgpu_xgmi_show_hive_id, - }; + if (attr == &amdgpu_xgmi_hive_id) + return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id); - ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr); - if (ret) { - dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n"); - kobject_del(hive->kobj); - kobject_put(hive->kobj); - hive->kobj = NULL; - } - - return ret; + return 0; } -static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive) +static void amdgpu_xgmi_hive_release(struct kobject *kobj) { - sysfs_remove_file(hive->kobj, &hive->dev_attr.attr); - kobject_del(hive->kobj); - kobject_put(hive->kobj); - hive->kobj = NULL; + struct amdgpu_hive_info *hive = container_of( + kobj, struct amdgpu_hive_info, kobj); + + mutex_destroy(&hive->hive_lock); + kfree(hive); } +static const struct sysfs_ops amdgpu_xgmi_hive_ops = { + .show = amdgpu_xgmi_show_attrs, +}; + +struct kobj_type amdgpu_xgmi_hive_type = { + .release = amdgpu_xgmi_hive_release, + .sysfs_ops = &amdgpu_xgmi_hive_ops, + .default_attrs = amdgpu_xgmi_hive_attrs, +}; + static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id); @@ -241,7 +227,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in; uint64_t fica_out; unsigned int error_count = 0; @@ -287,8 +273,8 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, /* Create sysfs link to hive info folder on the first device */ - if (adev != hive->adev) { - ret = sysfs_create_link(&adev->dev->kobj, hive->kobj, + if (hive->kobj.parent != (&adev->dev->kobj)) { + ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj, "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: Failed to create link to hive info"); @@ -296,9 +282,9 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, } } - sprintf(node, "node%d", hive->number_devices); + sprintf(node, "node%d", atomic_read(&hive->number_devices)); /* Create sysfs link form the hive folder to yourself */ - ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node); + ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node); if (ret) { dev_err(adev->dev, "XGMI: Failed to create link from hive info"); goto remove_link; @@ -308,7 +294,7 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, remove_link: - sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique); + sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique); remove_file: device_remove_file(adev->dev, &dev_attr_xgmi_device_id); @@ -326,78 +312,96 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, device_remove_file(adev->dev, &dev_attr_xgmi_device_id); device_remove_file(adev->dev, &dev_attr_xgmi_error); - if (adev != hive->adev) + if (hive->kobj.parent != (&adev->dev->kobj)) sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info"); - sprintf(node, "node%d", hive->number_devices); - sysfs_remove_link(hive->kobj, node); + sprintf(node, "node%d", atomic_read(&hive->number_devices)); + sysfs_remove_link(&hive->kobj, node); } -struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock) +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) { - int i; - struct amdgpu_hive_info *tmp; + struct amdgpu_hive_info *hive = NULL, *tmp = NULL; + int ret; if (!adev->gmc.xgmi.hive_id) return NULL; + if (adev->hive) { + kobject_get(&adev->hive->kobj); + return adev->hive; + } + mutex_lock(&xgmi_mutex); - for (i = 0 ; i < hive_count; ++i) { - tmp = &xgmi_hives[i]; - if (tmp->hive_id == adev->gmc.xgmi.hive_id) { - if (lock) - mutex_lock(&tmp->hive_lock); - mutex_unlock(&xgmi_mutex); - return tmp; + if (!list_empty(&xgmi_hive_list)) { + list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) { + if (hive->hive_id == adev->gmc.xgmi.hive_id) + goto pro_end; } } - if (i >= AMDGPU_MAX_XGMI_HIVE) { - mutex_unlock(&xgmi_mutex); - return NULL; + + hive = kzalloc(sizeof(*hive), GFP_KERNEL); + if (!hive) { + dev_err(adev->dev, "XGMI: allocation failed\n"); + hive = NULL; + goto pro_end; } /* initialize new hive if not exist */ - tmp = &xgmi_hives[hive_count++]; - - if (amdgpu_xgmi_sysfs_create(adev, tmp)) { - mutex_unlock(&xgmi_mutex); - return NULL; + ret = kobject_init_and_add(&hive->kobj, + &amdgpu_xgmi_hive_type, + &adev->dev->kobj, + "%s", "xgmi_hive_info"); + if (ret) { + dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kfree(hive); + hive = NULL; + goto pro_end; } - tmp->adev = adev; - tmp->hive_id = adev->gmc.xgmi.hive_id; - INIT_LIST_HEAD(&tmp->device_list); - mutex_init(&tmp->hive_lock); - mutex_init(&tmp->reset_lock); - task_barrier_init(&tmp->tb); - - if (lock) - mutex_lock(&tmp->hive_lock); - tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; - tmp->hi_req_gpu = NULL; + hive->hive_id = adev->gmc.xgmi.hive_id; + INIT_LIST_HEAD(&hive->device_list); + INIT_LIST_HEAD(&hive->node); + mutex_init(&hive->hive_lock); + atomic_set(&hive->in_reset, 0); + atomic_set(&hive->number_devices, 0); + task_barrier_init(&hive->tb); + hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; + hive->hi_req_gpu = NULL; /* * hive pstate on boot is high in vega20 so we have to go to low * pstate on after boot. */ - tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; + hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE; + list_add_tail(&hive->node, &xgmi_hive_list); + +pro_end: + if (hive) + kobject_get(&hive->kobj); mutex_unlock(&xgmi_mutex); + return hive; +} - return tmp; +void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive) +{ + if (hive) + kobject_put(&hive->kobj); } int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) { int ret = 0; - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); struct amdgpu_device *request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev; bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20; bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN; + amdgpu_put_xgmi_hive(hive); /* fw bug so temporarily disable pstate switching */ return 0; @@ -449,7 +453,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev /* Each psp need to set the latest topology */ ret = psp_xgmi_set_topology_info(&adev->psp, - hive->number_devices, + atomic_read(&hive->number_devices), &adev->psp.xgmi_context.top_info); if (ret) dev_err(adev->dev, @@ -511,7 +515,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; } - hive = amdgpu_get_xgmi_hive(adev, 1); + hive = amdgpu_get_xgmi_hive(adev); if (!hive) { ret = -EINVAL; dev_err(adev->dev, @@ -519,6 +523,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); goto exit; } + mutex_lock(&hive->hive_lock); top_info = &adev->psp.xgmi_context.top_info; @@ -526,7 +531,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) list_for_each_entry(entry, &hive->device_list, head) top_info->nodes[count++].node_id = entry->node_id; top_info->num_nodes = count; - hive->number_devices = count; + atomic_set(&hive->number_devices, count); task_barrier_add_task(&hive->tb); @@ -541,7 +546,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } ret = amdgpu_xgmi_update_topology(hive, tmp_adev); if (ret) - goto exit; + goto exit_unlock; } /* get latest topology info for each device from psp */ @@ -554,7 +559,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) tmp_adev->gmc.xgmi.node_id, tmp_adev->gmc.xgmi.hive_id, ret); /* To do : continue with some node failed or disable the whole hive */ - goto exit; + goto exit_unlock; } } } @@ -562,39 +567,51 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!ret) ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive); - +exit_unlock: mutex_unlock(&hive->hive_lock); exit: - if (!ret) + if (!ret) { + adev->hive = hive; dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n", adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); - else + } else { + amdgpu_put_xgmi_hive(hive); dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n", adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id, ret); + } return ret; } int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) { - struct amdgpu_hive_info *hive; + struct amdgpu_hive_info *hive = adev->hive; if (!adev->gmc.xgmi.supported) return -EINVAL; - hive = amdgpu_get_xgmi_hive(adev, 1); if (!hive) return -EINVAL; + mutex_lock(&hive->hive_lock); task_barrier_rem_task(&hive->tb); amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); + if (hive->hi_req_gpu == adev) + hive->hi_req_gpu = NULL; + list_del(&adev->gmc.xgmi.head); mutex_unlock(&hive->hive_lock); - if(!(--hive->number_devices)){ - amdgpu_xgmi_sysfs_destroy(adev, hive); - mutex_destroy(&hive->hive_lock); - mutex_destroy(&hive->reset_lock); + amdgpu_put_xgmi_hive(hive); + adev->hive = NULL; + + if (atomic_dec_return(&hive->number_devices) == 0) { + /* Remove the hive from global hive list */ + mutex_lock(&xgmi_mutex); + list_del(&hive->node); + mutex_unlock(&xgmi_mutex); + + amdgpu_put_xgmi_hive(hive); } return psp_xgmi_terminate(&adev->psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 6999eab16a72090c184736d432ffc857e4a935d6..148560d63554364e7cc1485c57c0fe00724f0440 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -27,13 +27,13 @@ struct amdgpu_hive_info { - uint64_t hive_id; - struct list_head device_list; - int number_devices; - struct mutex hive_lock, reset_lock; - struct kobject *kobj; - struct device_attribute dev_attr; - struct amdgpu_device *adev; + struct kobject kobj; + uint64_t hive_id; + struct list_head device_list; + struct list_head node; + atomic_t number_devices; + struct mutex hive_lock; + atomic_t in_reset; int hi_req_count; struct amdgpu_device *hi_req_gpu; struct task_barrier tb; @@ -50,7 +50,8 @@ struct amdgpu_pcs_ras_field { uint32_t pcs_err_shift; }; -struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); +void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev); int amdgpu_xgmi_remove_device(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index 847ca9b3ce4eabe0786884f84d5ad21c1ce843b0..3ea5578643200f2d89d72938f05f9b982c996e96 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -73,6 +73,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: athub_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 213e62a28ba034a495b2574fad65d28ca422854d..159a2a4385a1eb6851eb56d06c2fc8bf1d99ece2 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -41,7 +41,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); SET_CRTC_OVERSCAN_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); @@ -84,7 +84,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); ENABLE_SCALER_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); @@ -114,7 +114,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters); ENABLE_CRTC_PS_ALLOCATION args; @@ -131,7 +131,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC); ENABLE_CRTC_PS_ALLOCATION args; @@ -147,7 +147,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); BLANK_CRTC_PS_ALLOCATION args; @@ -163,7 +163,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; @@ -192,7 +192,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); SET_CRTC_USING_DTD_TIMING_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); u16 misc = 0; @@ -307,7 +307,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_encoder *encoder = amdgpu_crtc->encoder; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -588,7 +588,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, struct amdgpu_atom_ss *ss) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u8 frev, crev; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; @@ -749,7 +749,7 @@ int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); @@ -818,7 +818,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); u32 pll_clock = mode->clock; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 900b2727f432aeebf89e443de3b56465d98edbb7..a3ba9ca11e98fe550fa32c98d631048f7f8a13bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -60,7 +60,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, u8 delay, u8 *ack) { struct drm_device *dev = chan->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union aux_channel_transaction args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; @@ -305,7 +305,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector) { struct drm_device *dev = amdgpu_connector->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, amdgpu_connector->ddc_bus->rec.i2c_id, 0); @@ -718,7 +718,7 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *dig_connector; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 1e94a9b652f70d0a46eab130c2c72fab83f8a5cb..8339c8c3a328f7ff9bef199664b3c801f0123a7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -70,7 +70,7 @@ u8 amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) { struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return 0; @@ -84,7 +84,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode { struct drm_encoder *encoder = &amdgpu_encoder->base; struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder_atom_dig *dig; if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) @@ -152,7 +152,7 @@ amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd) struct amdgpu_backlight_privdata *pdata = bl_get_data(bd); struct amdgpu_encoder *amdgpu_encoder = pdata->encoder; struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); } @@ -166,7 +166,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode struct drm_connector *drm_connector) { struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct backlight_device *bd; struct backlight_properties props; struct amdgpu_backlight_privdata *pdata; @@ -229,7 +229,7 @@ void amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder) { struct drm_device *dev = amdgpu_encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct backlight_device *bd = NULL; struct amdgpu_encoder_atom_dig *dig; @@ -319,7 +319,7 @@ static void amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; @@ -382,7 +382,7 @@ static void amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); union dvo_encoder_control args; int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); @@ -573,7 +573,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder, int action, int panel_mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -762,7 +762,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a uint8_t lane_num, uint8_t lane_set) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1178,7 +1178,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector, { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct drm_device *dev = amdgpu_connector->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); union dig_transmitter_control args; int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); uint8_t frev, crev; @@ -1225,7 +1225,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder); union external_encoder_control args; @@ -1466,7 +1466,7 @@ void amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); union crtc_source_param args; @@ -1673,7 +1673,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder) void amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -1701,7 +1701,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); @@ -1751,7 +1751,7 @@ amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); uint32_t bios_0_scratch; @@ -1790,7 +1790,7 @@ amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder); @@ -1848,7 +1848,7 @@ amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector, bool connected) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -1999,7 +1999,7 @@ struct amdgpu_encoder_atom_dig * amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) { struct drm_device *dev = encoder->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_mode_info *mode_info = &adev->mode_info; int index = GetIndexIntoMasterTable(DATA, LVDS_Info); uint16_t data_offset, misc; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index b4cc7c55fa16f6137d27f641cc2594eecdb70ecf..09a538465ffdc5a9c9458071fca19cd90fd1c474 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -40,7 +40,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index c2c67ab68a43b12c5294c3e30231d445babb8f55..03ff8bd1fee84df67998a4be24adc8e3ed26c526 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1366,8 +1366,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) int r; if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); } else { + dev_info(adev->dev, "PCI CONFIG reset\n"); r = cik_asic_pci_config_reset(adev); } @@ -1919,6 +1921,10 @@ static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } +static void cik_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, @@ -1939,6 +1945,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .need_reset_on_init = &cik_need_reset_on_init, .get_pcie_replay_count = &cik_get_pcie_replay_count, .supports_baco = &cik_asic_supports_baco, + .pre_asic_init = &cik_pre_asic_init, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 84b45a019a36d34cc27ae039127bba7ba033520a..5963cbe0d455c23e98de71a498d1748a0b83992d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -328,7 +328,7 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -383,7 +383,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -504,7 +504,7 @@ void dce_v10_0_disable_dce(struct amdgpu_device *adev) static void dce_v10_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1209,7 +1209,7 @@ static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *ad static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1226,7 +1226,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1272,7 +1272,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1328,7 +1328,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1483,7 +1483,7 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev) static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1519,7 +1519,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint8_t *frame = buffer + 3; @@ -1538,7 +1538,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1569,7 +1569,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1749,7 +1749,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1822,7 +1822,7 @@ static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1836,7 +1836,7 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); @@ -1850,7 +1850,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -2095,7 +2095,7 @@ static void dce_v10_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); u32 tmp; @@ -2111,7 +2111,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; u32 tmp; @@ -2250,7 +2250,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2285,7 +2285,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2300,7 +2300,7 @@ static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); @@ -2311,7 +2311,7 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) static void dce_v10_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2329,7 +2329,7 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; amdgpu_crtc->cursor_x = x; @@ -2503,7 +2503,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2557,7 +2557,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2701,7 +2701,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2709,8 +2709,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = 128; amdgpu_crtc->max_cursor_height = 128; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; switch (amdgpu_crtc->crtc_id) { case 0: @@ -2792,24 +2792,24 @@ static int dce_v10_0_sw_init(void *handle) if (r) return r; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2819,7 +2819,7 @@ static int dce_v10_0_sw_init(void *handle) } if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2832,7 +2832,7 @@ static int dce_v10_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -2844,13 +2844,13 @@ static int dce_v10_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v10_0_audio_fini(adev); dce_v10_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -3157,14 +3157,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3176,7 +3176,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3245,7 +3245,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); @@ -3345,7 +3345,7 @@ dce_v10_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3385,7 +3385,7 @@ static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3485,7 +3485,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 01ce52266966a3f9388fe7c84f8b3f80efe0447a..1954472c8e8f678452d1cdef0d254373dea80ad6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -346,7 +346,7 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -400,7 +400,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -530,7 +530,7 @@ void dce_v11_0_disable_dce(struct amdgpu_device *adev) static void dce_v11_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1235,7 +1235,7 @@ static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *ad static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1252,7 +1252,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1298,7 +1298,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1354,7 +1354,7 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1525,7 +1525,7 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev) static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1561,7 +1561,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint8_t *frame = buffer + 3; @@ -1580,7 +1580,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1611,7 +1611,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1791,7 +1791,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1864,7 +1864,7 @@ static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1878,7 +1878,7 @@ static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); @@ -1892,7 +1892,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -2137,7 +2137,7 @@ static void dce_v11_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); u32 tmp; @@ -2153,7 +2153,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; u32 tmp; @@ -2283,7 +2283,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2364,7 +2364,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2379,7 +2379,7 @@ static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); @@ -2390,7 +2390,7 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) static void dce_v11_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); u32 tmp; WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, @@ -2408,7 +2408,7 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; amdgpu_crtc->cursor_x = x; @@ -2582,7 +2582,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2636,7 +2636,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2706,7 +2706,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (!amdgpu_crtc->adjusted_clock) return -EINVAL; @@ -2809,7 +2809,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2817,8 +2817,8 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = 128; amdgpu_crtc->max_cursor_height = 128; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; switch (amdgpu_crtc->crtc_id) { case 0: @@ -2913,24 +2913,24 @@ static int dce_v11_0_sw_init(void *handle) if (r) return r; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ @@ -2941,7 +2941,7 @@ static int dce_v11_0_sw_init(void *handle) } if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2954,7 +2954,7 @@ static int dce_v11_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -2966,13 +2966,13 @@ static int dce_v11_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v11_0_audio_fini(adev); dce_v11_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -3283,14 +3283,14 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, if(amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3302,7 +3302,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, if(works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3372,7 +3372,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); @@ -3471,7 +3471,7 @@ dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3511,7 +3511,7 @@ static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3611,7 +3611,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index cbddead3dafb103b2f7a24cdc417f591968b30af..3a44753a80d108e5a2f468586907ae75b6d763b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -279,7 +279,7 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -324,7 +324,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -401,7 +401,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1114,7 +1114,7 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1130,7 +1130,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1174,7 +1174,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1235,7 +1235,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1392,7 +1392,7 @@ static void dce_v6_0_audio_fini(struct amdgpu_device *adev) static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1408,7 +1408,7 @@ static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder, uint32_t clock, int bpc) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1446,7 +1446,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1488,7 +1488,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); u32 tmp; @@ -1522,7 +1522,7 @@ static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1566,7 +1566,7 @@ static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1579,7 +1579,7 @@ static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1616,7 +1616,7 @@ static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 tmp; @@ -1645,7 +1645,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1714,7 +1714,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1788,7 +1788,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1799,7 +1799,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); } @@ -1810,7 +1810,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -2033,7 +2033,7 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -2048,7 +2048,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; @@ -2148,7 +2148,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2177,7 +2177,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2192,7 +2192,7 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | @@ -2204,7 +2204,7 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) static void dce_v6_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(amdgpu_crtc->cursor_addr)); @@ -2222,7 +2222,7 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; int w = amdgpu_crtc->cursor_width; @@ -2397,7 +2397,7 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2447,7 +2447,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2591,7 +2591,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2599,8 +2599,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; @@ -2669,20 +2669,20 @@ static int dce_v6_0_sw_init(void *handle) adev->mode_info.mode_config_initialized = true; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2693,7 +2693,7 @@ static int dce_v6_0_sw_init(void *handle) ret = amdgpu_atombios_get_connector_info_from_object_table(adev); if (ret) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2706,7 +2706,7 @@ static int dce_v6_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); return r; } @@ -2717,12 +2717,12 @@ static int dce_v6_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v6_0_audio_fini(adev); dce_v6_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -2967,7 +2967,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); break; @@ -3036,14 +3036,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3055,7 +3055,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3146,7 +3146,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3187,7 +3187,7 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3297,7 +3297,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index fa0ad50b628c38fa2234f6719c9b7b788be5fd2e..3603e5f13077568db62e930ae3dfdbcb88db5b8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -273,7 +273,7 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, */ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -318,7 +318,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) */ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; u32 tmp; @@ -444,7 +444,7 @@ void dce_v8_0_disable_dce(struct amdgpu_device *adev) static void dce_v8_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1146,7 +1146,7 @@ static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *ade static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; @@ -1164,7 +1164,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1225,7 +1225,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; @@ -1278,7 +1278,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; @@ -1446,7 +1446,7 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev) static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1469,7 +1469,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, size_t size) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint32_t offset = dig->afmt->offset; @@ -1489,7 +1489,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); @@ -1516,7 +1516,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -1678,7 +1678,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -1751,7 +1751,7 @@ static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 vga_control; vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; @@ -1765,7 +1765,7 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); if (enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); @@ -1779,7 +1779,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -2004,7 +2004,7 @@ static void dce_v8_0_set_interleave(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -2018,7 +2018,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u16 *r, *g, *b; int i; @@ -2140,7 +2140,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 pll_in_use; int pll; @@ -2188,7 +2188,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint32_t cur_lock; @@ -2203,7 +2203,7 @@ static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | @@ -2213,7 +2213,7 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) static void dce_v8_0_show_cursor(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, upper_32_bits(amdgpu_crtc->cursor_addr)); @@ -2230,7 +2230,7 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, int x, int y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int xorigin = 0, yorigin = 0; amdgpu_crtc->cursor_x = x; @@ -2404,7 +2404,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -2458,7 +2458,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_atom_ss ss; int i; @@ -2609,7 +2609,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -2617,8 +2617,8 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; - adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; - adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; @@ -2689,24 +2689,24 @@ static int dce_v8_0_sw_init(void *handle) if (r) return r; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -2716,7 +2716,7 @@ static int dce_v8_0_sw_init(void *handle) } if (amdgpu_atombios_get_connector_info_from_object_table(adev)) - amdgpu_display_print_display_setup(adev->ddev); + amdgpu_display_print_display_setup(adev_to_drm(adev)); else return -EINVAL; @@ -2729,7 +2729,7 @@ static int dce_v8_0_sw_init(void *handle) if (r) return r; - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -2741,13 +2741,13 @@ static int dce_v8_0_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); dce_v8_0_audio_fini(adev); dce_v8_0_afmt_fini(adev); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; return 0; @@ -3057,7 +3057,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); if (amdgpu_irq_enabled(adev, source, irq_type)) { - drm_handle_vblank(adev->ddev, crtc); + drm_handle_vblank(adev_to_drm(adev), crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); break; @@ -3126,14 +3126,14 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -3145,7 +3145,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); @@ -3233,7 +3233,7 @@ dce_v8_0_encoder_mode_set(struct drm_encoder *encoder, static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(encoder->dev); struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); @@ -3273,7 +3273,7 @@ static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); /* need to call this here as we need the crtc set up */ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); @@ -3373,7 +3373,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, uint32_t supported_device, u16 caps) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index d5ff7b6331ff98c7636cf04ad31be92e0f4b5a8c..cc93577dee03f5a9ce864a27bc4b84ba34e3a92d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -132,7 +132,7 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); unsigned type; @@ -235,7 +235,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) if (amdgpu_crtc == NULL) return -ENOMEM; - drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); + drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs); drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); amdgpu_crtc->crtc_id = index; @@ -374,24 +374,24 @@ static int dce_virtual_sw_init(void *handle) if (r) return r; - adev->ddev->max_vblank_count = 0; + adev_to_drm(adev)->max_vblank_count = 0; - adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); if (r) return r; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; /* allocate crtcs, encoders, connectors */ for (i = 0; i < adev->mode_info.num_crtc; i++) { @@ -403,7 +403,7 @@ static int dce_virtual_sw_init(void *handle) return r; } - drm_kms_helper_poll_init(adev->ddev); + drm_kms_helper_poll_init(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = true; return 0; @@ -415,9 +415,9 @@ static int dce_virtual_sw_fini(void *handle) kfree(adev->mode_info.bios_hardcoded_edid); - drm_kms_helper_poll_fini(adev->ddev); + drm_kms_helper_poll_fini(adev_to_drm(adev)); - drm_mode_config_cleanup(adev->ddev); + drm_mode_config_cleanup(adev_to_drm(adev)); /* clear crtcs pointer to avoid dce irq finish routine access freed data */ memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS); adev->mode_info.mode_config_initialized = false; @@ -602,7 +602,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, if (!encoder) return -ENOMEM; encoder->possible_crtcs = 1 << index; - drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs, + drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL); drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); @@ -613,7 +613,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, } /* add a new connector */ - drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs, + drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; @@ -663,14 +663,14 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev, if (amdgpu_crtc == NULL) return 0; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); works = amdgpu_crtc->pflip_works; if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " "AMDGPU_FLIP_SUBMITTED(%d)\n", amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return 0; } @@ -682,7 +682,7 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev, if (works->event) drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); drm_crtc_vblank_put(&amdgpu_crtc->base); amdgpu_bo_unref(&works->old_abo); @@ -697,7 +697,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer, struct amdgpu_crtc, vblank_timer); struct drm_device *ddev = amdgpu_crtc->base.dev; - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 1ab26183698342dab9ff42f9e9b8769c631675ed..2eab808fffeb57b582df293de234d094529842c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -251,7 +251,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, int i, count; ddev = dev_get_drvdata(dev); - adev = ddev->dev_private; + adev = drm_to_adev(ddev); count = 0; for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { @@ -646,7 +646,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, uint64_t config, uint64_t *count) { - uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0; + uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0; *count = 0; switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 65997ffaed4566ced592e4d828fd13fe34006017..d502e30f67d9a41846df8107060dbb0f91b33329 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3307,6 +3307,29 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs; } +static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_NAVI10: + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_0_nv10, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10)); + break; + case CHIP_NAVI14: + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_1_nv14, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14)); + break; + case CHIP_NAVI12: + soc15_program_register_sequence(adev, + golden_settings_gc_rlc_spm_10_1_2_nv12, + (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12)); + break; + default: + break; + } +} + static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { @@ -3317,9 +3340,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) soc15_program_register_sequence(adev, golden_settings_gc_10_0_nv10, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); - soc15_program_register_sequence(adev, - golden_settings_gc_rlc_spm_10_0_nv10, - (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10)); break; case CHIP_NAVI14: soc15_program_register_sequence(adev, @@ -3328,9 +3348,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) soc15_program_register_sequence(adev, golden_settings_gc_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); - soc15_program_register_sequence(adev, - golden_settings_gc_rlc_spm_10_1_nv14, - (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14)); break; case CHIP_NAVI12: soc15_program_register_sequence(adev, @@ -3339,9 +3356,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) soc15_program_register_sequence(adev, golden_settings_gc_10_1_2_nv12, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12)); - soc15_program_register_sequence(adev, - golden_settings_gc_rlc_spm_10_1_2_nv12, - (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12)); break; case CHIP_SIENNA_CICHLID: soc15_program_register_sequence(adev, @@ -3360,6 +3374,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) default: break; } + gfx_v10_0_init_spm_golden_registers(adev); } static void gfx_v10_0_scratch_init(struct amdgpu_device *adev) @@ -4022,21 +4037,23 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE; - r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_obj, - &adev->gfx.mec.hpd_eop_gpu_addr, - (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - gfx_v10_0_mec_fini(adev); - return r; - } + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + gfx_v10_0_mec_fini(adev); + return r; + } - memset(hpd, 0, mec_hpd_size); + memset(hpd, 0, mec_hpd_size); - amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; @@ -4147,6 +4164,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = { .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs, .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q, + .init_spm_golden = &gfx_v10_0_init_spm_golden_registers, }; static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) @@ -6180,7 +6198,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) struct v10_gfx_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.gfx_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -6192,7 +6210,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (adev->in_gpu_reset) { + } else if (amdgpu_in_reset(adev)) { /* reset mqd with the backup copy */ if (adev->gfx.me.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); @@ -6433,6 +6451,10 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring) struct v10_compute_mqd *mqd = ring->mqd_ptr; int j; + /* inactivate the queue */ + if (amdgpu_sriov_vf(adev)) + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0); + /* disable wptr polling */ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); @@ -6541,7 +6563,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v10_0_kiq_setting(ring); - if (adev->in_gpu_reset) { /* for GPU_RESET case */ + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); @@ -6577,7 +6599,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) struct v10_compute_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); @@ -6587,7 +6609,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); - } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); @@ -7033,8 +7055,7 @@ static int gfx_v10_0_soft_reset(void *handle) GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | - GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK - | GRBM_STATUS__BCI_BUSY_MASK)) { + GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) { grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); @@ -7159,7 +7180,7 @@ static int gfx_v10_0_early_init(void *handle) break; } - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = amdgpu_num_kcq; gfx_v10_0_set_kiq_pm4_funcs(adev); gfx_v10_0_set_ring_funcs(adev); @@ -7263,10 +7284,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); - - /* only for Vega10 & Raven1 */ - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); @@ -7429,7 +7448,6 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_CGCG | - AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) gfx_v10_0_enable_gui_idle_interrupt(adev, enable); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 33f1c4a46ebe0b35f74c25a7173bfdbfa11543f4..94b7e0531d09285e41e0eb6a21569b081bd8a7a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1343,21 +1343,22 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + return r; + } - r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.mec.hpd_eop_obj, - &adev->gfx.mec.hpd_eop_gpu_addr, - (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - - memset(hpd, 0, mec_hpd_size); + memset(hpd, 0, mec_hpd_size); - amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } return 0; } @@ -3250,7 +3251,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) dev_warn(adev->dev, "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n", adev->asic_type); - /* fall through */ + fallthrough; case CHIP_CARRIZO: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | @@ -4632,7 +4633,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v8_0_kiq_setting(ring); - if (adev->in_gpu_reset) { /* for GPU_RESET case */ + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -4669,7 +4670,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -4681,7 +4682,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); - } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -5294,7 +5295,7 @@ static int gfx_v8_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = amdgpu_num_kcq; adev->gfx.funcs = &gfx_v8_0_gfx_funcs; gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_irq_funcs(adev); @@ -5342,10 +5343,9 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - if (((adev->asic_type == CHIP_POLARIS11) || + if ((adev->asic_type == CHIP_POLARIS11) || (adev->asic_type == CHIP_POLARIS12) || - (adev->asic_type == CHIP_VEGAM)) && - adev->powerplay.pp_funcs->set_powergating_by_smu) + (adev->asic_type == CHIP_VEGAM)) /* Send msg to SMU via Powerplay */ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable); @@ -5879,8 +5879,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_CG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { @@ -5901,8 +5900,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_MG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } return 0; @@ -5931,8 +5929,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_CG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) { @@ -5951,8 +5948,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_3D, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { @@ -5973,8 +5969,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_MG, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { @@ -5989,8 +5984,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_RLC, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { @@ -6004,8 +5998,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, PP_BLOCK_GFX_CP, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index cb9d60a4e05ed50004c6e731050a450ca98ba5a0..93c63ff3b35ee59d6a5175688b4daf61ce47eef0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -691,6 +691,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000) }; static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = { @@ -1938,22 +1939,23 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) /* take ownership of the relevant compute queues */ amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; + if (mec_hpd_size) { + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); + if (r) { + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); + gfx_v9_0_mec_fini(adev); + return r; + } - r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.mec.hpd_eop_obj, - &adev->gfx.mec.hpd_eop_gpu_addr, - (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - - memset(hpd, 0, mec_hpd_size); + memset(hpd, 0, mec_hpd_size); - amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); + } mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; @@ -3684,7 +3686,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v9_0_kiq_setting(ring); - if (adev->in_gpu_reset) { /* for GPU_RESET case */ + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); @@ -3722,7 +3724,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) struct v9_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -3734,7 +3736,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); - } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); @@ -3928,7 +3930,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* Use deinitialize sequence from CAIL when unbinding device from driver, * otherwise KIQ is hanging when binding back */ - if (!adev->in_gpu_reset && !adev->in_suspend) { + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, adev->gfx.kiq.ring.me, adev->gfx.kiq.ring.pipe, @@ -4086,7 +4088,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) * * also don't wait anymore for IRQ context * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) goto failed_kiq_read; might_sleep(); @@ -4625,7 +4627,7 @@ static int gfx_v9_0_early_init(void *handle) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + adev->gfx.num_compute_rings = amdgpu_num_kcq; gfx_v9_0_set_kiq_pm4_funcs(adev); gfx_v9_0_set_ring_funcs(adev); gfx_v9_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c old mode 100755 new mode 100644 index 46351db36922649ed75ae635feb96efd4339faac..bd85aed3523a2526c496e450fedf2bf7c8d90c2c --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -57,10 +57,10 @@ static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = { /* SPI */ { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, /* SQ */ - { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 }, - { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 }, - { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 }, - { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 8, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 8, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 8, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 8, 16 }, /* SQC */ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 394e6f56948a5ee3ba04d57f052e047a348fa551..b882ac59879a0bfe5ebae0e0893a4c2421abfab7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -31,6 +31,77 @@ #include "soc15_common.h" +static const char *gfxhub_client_ids[] = { + "CB/DB", + "Reserved", + "GE1", + "GE2", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "Reserved", + "SDMA0", + "SDMA1", + "GCR", + "SDMA2", + "SDMA3", +}; + +static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + u32 cid = REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, CID); + + dev_err(adev->dev, + "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], + cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, RW)); +} + u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); @@ -360,6 +431,11 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); } +static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = { + .print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status, + .get_invalidate_req = gfxhub_v2_0_get_invalidate_req, +}; + void gfxhub_v2_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; @@ -390,4 +466,14 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev) mmGCVM_INVALIDATE_ENG0_REQ; hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index fa0bca3e1f730201574435440be503d335dbd51b..237a9ff5afa0b23f745279d38c5eeed62c2042fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -31,6 +31,77 @@ #include "soc15_common.h" +static const char *gfxhub_client_ids[] = { + "CB/DB", + "Reserved", + "GE1", + "GE2", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "Reserved", + "SDMA0", + "SDMA1", + "GCR", + "SDMA2", + "SDMA3", +}; + +static uint32_t gfxhub_v2_1_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + u32 cid = REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, CID); + + dev_err(adev->dev, + "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], + cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%lx\n", + REG_GET_FIELD(status, + GCVM_L2_PROTECTION_FAULT_STATUS, RW)); +} + u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE); @@ -135,6 +206,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -190,6 +267,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev) static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev) { + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, @@ -326,6 +409,13 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -359,6 +449,11 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp); } +static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = { + .print_l2_protection_fault_status = gfxhub_v2_1_print_l2_protection_fault_status, + .get_invalidate_req = gfxhub_v2_1_get_invalidate_req, +}; + void gfxhub_v2_1_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; @@ -389,6 +484,16 @@ void gfxhub_v2_1_init(struct amdgpu_device *adev) mmGCVM_INVALIDATE_ENG0_REQ; hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs; } int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index ec90c62078d96ec21b4a895e5f025ea27f8b1868..31359e519d69d8889395dda8e8745cc69d6cbf31 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -25,11 +25,10 @@ #include "amdgpu.h" #include "amdgpu_atomfirmware.h" #include "gmc_v10_0.h" +#include "umc_v8_7.h" #include "hdp/hdp_5_0_0_offset.h" #include "hdp/hdp_5_0_0_sh_mask.h" -#include "gc/gc_10_1_0_sh_mask.h" -#include "mmhub/mmhub_2_0_0_sh_mask.h" #include "athub/athub_2_0_0_sh_mask.h" #include "athub/athub_2_0_0_offset.h" #include "dcn/dcn_2_0_0_offset.h" @@ -57,68 +56,31 @@ static const struct soc15_reg_golden golden_settings_navi10_hdp[] = }; #endif +static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, enum amdgpu_interrupt_state state) { - struct amdgpu_vmhub *hub; - u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i; - - bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; - - bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; - switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = RREG32(reg); - tmp &= ~bits[AMDGPU_MMHUB_0]; - WREG32(reg, tmp); - } - + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = RREG32(reg); - tmp &= ~bits[AMDGPU_GFXHUB_0]; - WREG32(reg, tmp); - } + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = RREG32(reg); - tmp |= bits[AMDGPU_MMHUB_0]; - WREG32(reg, tmp); - } - + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; - for (i = 0; i < 16; i++) { - reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = RREG32(reg); - tmp |= bits[AMDGPU_GFXHUB_0]; - WREG32(reg, tmp); - } + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); break; default: break; @@ -166,29 +128,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, task_info.task_name, task_info.pid); dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) { - dev_err(adev->dev, - "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", - status); - dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, CID)); - dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); - dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); - dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); - dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); - dev_err(adev->dev, "\t RW: 0x%lx\n", - REG_GET_FIELD(status, - GCVM_L2_PROTECTION_FAULT_STATUS, RW)); - } + if (!amdgpu_sriov_vf(adev)) + hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); } return 0; @@ -199,30 +140,20 @@ static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = { .process = gmc_v10_0_process_interrupt, }; -static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) +static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = { + .set = gmc_v10_0_ecc_interrupt_state, + .process = amdgpu_umc_process_ecc_irq, +}; + + static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gmc.vm_fault.num_types = 1; adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; -} -static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid, - uint32_t flush_type) -{ - u32 req = 0; - - /* invalidate using legacy mode on vmid*/ - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vmid); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); - req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); - - return req; + if (!amdgpu_sriov_vf(adev)) { + adev->gmc.ecc_irq.num_types = 1; + adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs; + } } /** @@ -265,7 +196,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 tmp; /* Use register 17 for GART */ const unsigned eng = 17; @@ -356,16 +287,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ if (adev->gfx.kiq.ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - !adev->in_gpu_reset) { - + down_read_trylock(&adev->reset_sem)) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; const unsigned eng = 17; - u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); + + up_read(&adev->reset_sem); return; } @@ -381,7 +313,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || - adev->in_gpu_reset || + amdgpu_in_reset(adev) || ring->sched.ready == false) { gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); mutex_unlock(&adev->mman.gtt_window_lock); @@ -459,7 +391,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, spin_unlock(&adev->gfx.kiq.ring_lock); r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); return -ETIME; } @@ -491,7 +423,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; - uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); + uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; /* @@ -641,6 +573,28 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, } } +static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + u32 pitch; + + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * + 4); + } + + return size; +} + static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, @@ -648,7 +602,8 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, .map_mtype = gmc_v10_0_map_mtype, .get_vm_pde = gmc_v10_0_get_vm_pde, - .get_vm_pte = gmc_v10_0_get_vm_pte + .get_vm_pte = gmc_v10_0_get_vm_pte, + .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size, }; static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -657,12 +612,36 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; } +static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: + adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; + adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; + adev->umc.funcs = &umc_v8_7_funcs; + break; + default: + break; + } +} + + +static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) +{ + adev->mmhub.funcs = &mmhub_v2_0_funcs; +} + static int gmc_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v10_0_set_mmhub_funcs(adev); gmc_v10_0_set_gmc_funcs(adev); gmc_v10_0_set_irq_funcs(adev); + gmc_v10_0_set_umc_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -685,6 +664,10 @@ static int gmc_v10_0_late_init(void *handle) if (r) return r; + r = amdgpu_gmc_ras_late_init(adev); + if (r) + return r; + return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); } @@ -789,36 +772,6 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) -{ - u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - unsigned size; - - if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ - } else { - u32 viewport; - u32 pitch; - - viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); - pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH); - size = (REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * - REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * - 4); - } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) { - DRM_ERROR("Warning: pre-OS buffer uses most of vram, \ - be aware of gart table overwrite\n"); - return 0; - } - - return size; -} - - - static int gmc_v10_0_sw_init(void *handle) { int r, vram_width = 0, vram_type = 0, vram_vendor = 0; @@ -830,7 +783,7 @@ static int gmc_v10_0_sw_init(void *handle) else gfxhub_v2_0_init(adev); - mmhub_v2_0_init(adev); + adev->mmhub.funcs->init(adev); spin_lock_init(&adev->gmc.invalidate_lock); @@ -878,6 +831,14 @@ static int gmc_v10_0_sw_init(void *handle) if (r) return r; + if (!amdgpu_sriov_vf(adev)) { + /* interrupt sent to DF. */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, + &adev->gmc.ecc_irq); + if (r) + return r; + } + /* * Set the internal MC address mask This is the max address of the GPU's * internal address space. @@ -900,7 +861,7 @@ static int gmc_v10_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -991,7 +952,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - r = mmhub_v2_0_gart_enable(adev); + r = adev->mmhub.funcs->gart_enable(adev); if (r) return r; @@ -1013,7 +974,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) gfxhub_v2_1_set_fault_enable_default(adev, value); else gfxhub_v2_0_set_fault_enable_default(adev, value); - mmhub_v2_0_set_fault_enable_default(adev, value); + adev->mmhub.funcs->set_fault_enable_default(adev, value); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); @@ -1038,6 +999,9 @@ static int gmc_v10_0_hw_init(void *handle) if (r) return r; + if (adev->umc.funcs && adev->umc.funcs->init_registers) + adev->umc.funcs->init_registers(adev); + return 0; } @@ -1055,7 +1019,7 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) gfxhub_v2_1_gart_disable(adev); else gfxhub_v2_0_gart_disable(adev); - mmhub_v2_0_gart_disable(adev); + adev->mmhub.funcs->gart_disable(adev); amdgpu_gart_table_vram_unpin(adev); } @@ -1069,6 +1033,7 @@ static int gmc_v10_0_hw_fini(void *handle) return 0; } + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); gmc_v10_0_gart_disable(adev); @@ -1121,7 +1086,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle, int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = mmhub_v2_0_set_clockgating(adev, state); + r = adev->mmhub.funcs->set_clockgating(adev, state); if (r) return r; @@ -1136,7 +1101,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - mmhub_v2_0_get_clockgating(adev, flags); + adev->mmhub.funcs->get_clockgating(adev, flags); if (adev->asic_type == CHIP_SIENNA_CICHLID || adev->asic_type == CHIP_NAVY_FLOUNDER) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 538e7ee35cdf2d7c7f374e04e23213fb4e1ae6c2..95a9117e95640e5a31cb569d678cff8f1716e4a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -805,16 +805,13 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) unsigned size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; return size; } @@ -862,7 +859,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); r = amdgpu_bo_init(adev); if (r) @@ -1136,6 +1133,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { .set_prt = gmc_v6_0_set_prt, .get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pte = gmc_v6_0_get_vm_pte, + .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size, }; static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index e18296dc13861d0eaee699bb0da6652948456d27..80c146df338aaea3fc994a8f8f30f7c1519cffef 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -434,7 +434,7 @@ static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, int vmid; unsigned int tmp; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; for (vmid = 1; vmid < 16; vmid++) { @@ -970,16 +970,14 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) unsigned size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; + return size; } @@ -1035,7 +1033,7 @@ static int gmc_v7_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -1372,7 +1370,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .set_prt = gmc_v7_0_set_prt, .get_vm_pde = gmc_v7_0_get_vm_pde, - .get_vm_pte = gmc_v7_0_get_vm_pte + .get_vm_pte = gmc_v7_0_get_vm_pte, + .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size, }; static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index a9e722b8a45833b83dfa95d7dd33d3fb24f35018..9ab65ca7df777f02c519b3c2d3b3a9cee45eac3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -635,7 +635,7 @@ static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, int vmid; unsigned int tmp; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; for (vmid = 1; vmid < 16; vmid++) { @@ -1087,16 +1087,14 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) unsigned size; if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { u32 viewport = RREG32(mmVIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 4); } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; + return size; } @@ -1160,7 +1158,7 @@ static int gmc_v8_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -1739,7 +1737,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .set_prt = gmc_v8_0_set_prt, .get_vm_pde = gmc_v8_0_get_vm_pde, - .get_vm_pte = gmc_v8_0_get_vm_pte + .get_vm_pte = gmc_v8_0_get_vm_pte, + .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size, }; static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6e4f3ff4810f1475c9e8860c479ac53c43a26451..91629c2b1d5c2d9ecab222f9f34ef7eee5813649 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -67,6 +67,222 @@ #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 + + +static const char *gfxhub_client_ids[] = { + "CB", + "DB", + "IA", + "WD", + "CPF", + "CPC", + "CPG", + "RLC", + "TCP", + "SQC (inst)", + "SQC (data)", + "SQG", + "PA", +}; + +static const char *mmhub_client_ids_raven[][2] = { + [0][0] = "MP1", + [1][0] = "MP0", + [2][0] = "VCN", + [3][0] = "VCNU", + [4][0] = "HDP", + [5][0] = "DCE", + [13][0] = "UTCL2", + [19][0] = "TLS", + [26][0] = "OSS", + [27][0] = "SDMA0", + [0][1] = "MP1", + [1][1] = "MP0", + [2][1] = "VCN", + [3][1] = "VCNU", + [4][1] = "HDP", + [5][1] = "XDP", + [6][1] = "DBGU0", + [7][1] = "DCE", + [8][1] = "DCEDWB0", + [9][1] = "DCEDWB1", + [26][1] = "OSS", + [27][1] = "SDMA0", +}; + +static const char *mmhub_client_ids_renoir[][2] = { + [0][0] = "MP1", + [1][0] = "MP0", + [2][0] = "HDP", + [4][0] = "DCEDMC", + [5][0] = "DCEVGA", + [13][0] = "UTCL2", + [19][0] = "TLS", + [26][0] = "OSS", + [27][0] = "SDMA0", + [28][0] = "VCN", + [29][0] = "VCNU", + [30][0] = "JPEG", + [0][1] = "MP1", + [1][1] = "MP0", + [2][1] = "HDP", + [3][1] = "XDP", + [6][1] = "DBGU0", + [7][1] = "DCEDMC", + [8][1] = "DCEVGA", + [9][1] = "DCEDWB", + [26][1] = "OSS", + [27][1] = "SDMA0", + [28][1] = "VCN", + [29][1] = "VCNU", + [30][1] = "JPEG", +}; + +static const char *mmhub_client_ids_vega10[][2] = { + [0][0] = "MP0", + [1][0] = "UVD", + [2][0] = "UVDU", + [3][0] = "HDP", + [13][0] = "UTCL2", + [14][0] = "OSS", + [15][0] = "SDMA1", + [32+0][0] = "VCE0", + [32+1][0] = "VCE0U", + [32+2][0] = "XDMA", + [32+3][0] = "DCE", + [32+4][0] = "MP1", + [32+14][0] = "SDMA0", + [0][1] = "MP0", + [1][1] = "UVD", + [2][1] = "UVDU", + [3][1] = "DBGU0", + [4][1] = "HDP", + [5][1] = "XDP", + [14][1] = "OSS", + [15][1] = "SDMA0", + [32+0][1] = "VCE0", + [32+1][1] = "VCE0U", + [32+2][1] = "XDMA", + [32+3][1] = "DCE", + [32+4][1] = "DCEDWB", + [32+5][1] = "MP1", + [32+6][1] = "DBGU1", + [32+14][1] = "SDMA1", +}; + +static const char *mmhub_client_ids_vega12[][2] = { + [0][0] = "MP0", + [1][0] = "VCE0", + [2][0] = "VCE0U", + [3][0] = "HDP", + [13][0] = "UTCL2", + [14][0] = "OSS", + [15][0] = "SDMA1", + [32+0][0] = "DCE", + [32+1][0] = "XDMA", + [32+2][0] = "UVD", + [32+3][0] = "UVDU", + [32+4][0] = "MP1", + [32+15][0] = "SDMA0", + [0][1] = "MP0", + [1][1] = "VCE0", + [2][1] = "VCE0U", + [3][1] = "DBGU0", + [4][1] = "HDP", + [5][1] = "XDP", + [14][1] = "OSS", + [15][1] = "SDMA0", + [32+0][1] = "DCE", + [32+1][1] = "DCEDWB", + [32+2][1] = "XDMA", + [32+3][1] = "UVD", + [32+4][1] = "UVDU", + [32+5][1] = "MP1", + [32+6][1] = "DBGU1", + [32+15][1] = "SDMA1", +}; + +static const char *mmhub_client_ids_vega20[][2] = { + [0][0] = "XDMA", + [1][0] = "DCE", + [2][0] = "VCE0", + [3][0] = "VCE0U", + [4][0] = "UVD", + [5][0] = "UVD1U", + [13][0] = "OSS", + [14][0] = "HDP", + [15][0] = "SDMA0", + [32+0][0] = "UVD", + [32+1][0] = "UVDU", + [32+2][0] = "MP1", + [32+3][0] = "MP0", + [32+12][0] = "UTCL2", + [32+14][0] = "SDMA1", + [0][1] = "XDMA", + [1][1] = "DCE", + [2][1] = "DCEDWB", + [3][1] = "VCE0", + [4][1] = "VCE0U", + [5][1] = "UVD1", + [6][1] = "UVD1U", + [7][1] = "DBGU0", + [8][1] = "XDP", + [13][1] = "OSS", + [14][1] = "HDP", + [15][1] = "SDMA0", + [32+0][1] = "UVD", + [32+1][1] = "UVDU", + [32+2][1] = "DBGU1", + [32+3][1] = "MP1", + [32+4][1] = "MP0", + [32+14][1] = "SDMA1", +}; + +static const char *mmhub_client_ids_arcturus[][2] = { + [2][0] = "MP1", + [3][0] = "MP0", + [10][0] = "UTCL2", + [13][0] = "OSS", + [14][0] = "HDP", + [15][0] = "SDMA0", + [32+15][0] = "SDMA1", + [64+15][0] = "SDMA2", + [96+15][0] = "SDMA3", + [128+15][0] = "SDMA4", + [160+11][0] = "JPEG", + [160+12][0] = "VCN", + [160+13][0] = "VCNU", + [160+15][0] = "SDMA5", + [192+10][0] = "UTCL2", + [192+11][0] = "JPEG1", + [192+12][0] = "VCN1", + [192+13][0] = "VCN1U", + [192+15][0] = "SDMA6", + [224+15][0] = "SDMA7", + [0][1] = "DBGU1", + [1][1] = "XDP", + [2][1] = "MP1", + [3][1] = "MP0", + [13][1] = "OSS", + [14][1] = "HDP", + [15][1] = "SDMA0", + [32+15][1] = "SDMA1", + [32+15][1] = "SDMA1", + [64+15][1] = "SDMA2", + [96+15][1] = "SDMA3", + [128+15][1] = "SDMA4", + [160+11][1] = "JPEG", + [160+12][1] = "VCN", + [160+13][1] = "VCNU", + [160+15][1] = "SDMA5", + [192+11][1] = "JPEG1", + [192+12][1] = "VCN1", + [192+13][1] = "VCN1U", + [192+15][1] = "SDMA6", + [224+15][1] = "SDMA7", +}; static const u32 golden_settings_vega10_hdp[] = { @@ -300,9 +516,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, { struct amdgpu_vmhub *hub; bool retry_fault = !!(entry->src_data[1] & 0x80); - uint32_t status = 0; + uint32_t status = 0, cid = 0, rw = 0; u64 addr; char hub_name[10]; + const char *mmhub_cid; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; @@ -337,6 +554,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); + cid = REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, CID); + rw = REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, RW); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); } @@ -359,9 +580,37 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, CID)); + if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], + cid); + } else { + switch (adev->asic_type) { + case CHIP_VEGA10: + mmhub_cid = mmhub_client_ids_vega10[cid][rw]; + break; + case CHIP_VEGA12: + mmhub_cid = mmhub_client_ids_vega12[cid][rw]; + break; + case CHIP_VEGA20: + mmhub_cid = mmhub_client_ids_vega20[cid][rw]; + break; + case CHIP_ARCTURUS: + mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; + break; + case CHIP_RAVEN: + mmhub_cid = mmhub_client_ids_raven[cid][rw]; + break; + case CHIP_RENOIR: + mmhub_cid = mmhub_client_ids_renoir[cid][rw]; + break; + default: + mmhub_cid = NULL; + break; + } + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + mmhub_cid ? mmhub_cid : "unknown", cid); + } dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); @@ -374,10 +623,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); - dev_err(adev->dev, "\t RW: 0x%lx\n", - REG_GET_FIELD(status, - VM_L2_PROTECTION_FAULT_STATUS, RW)); - + dev_err(adev->dev, "\t RW: 0x%x\n", rw); } } @@ -500,13 +746,14 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * as GFXOFF under bare metal */ if (adev->gfx.kiq.ring.sched.ready && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - !adev->in_gpu_reset) { + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && + down_read_trylock(&adev->reset_sem)) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); + up_read(&adev->reset_sem); return; } @@ -596,10 +843,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_kiq *kiq = &adev->gfx.kiq; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EIO; - if (ring->sched.ready) { + if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) { /* Vega20+XGMI caches PTEs in TC and TLB. Add a * heavy-weight TLB flush (type 2), which flushes * both. Due to a race condition with concurrent @@ -626,6 +873,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (r) { amdgpu_ring_undo(ring); spin_unlock(&adev->gfx.kiq.ring_lock); + up_read(&adev->reset_sem); return -ETIME; } @@ -633,10 +881,11 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, spin_unlock(&adev->gfx.kiq.ring_lock); r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); + up_read(&adev->reset_sem); return -ETIME; } - + up_read(&adev->reset_sem); return 0; } @@ -826,6 +1075,41 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, *flags |= AMDGPU_PTE_SNOOPED; } +static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = AMDGPU_VBIOS_VGA_ALLOCATION; + } else { + u32 viewport; + + switch (adev->asic_type) { + case CHIP_RAVEN: + case CHIP_RENOIR: + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * + 4); + break; + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + default: + viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * + 4); + break; + } + } + + return size; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, @@ -833,7 +1117,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, .map_mtype = gmc_v9_0_map_mtype, .get_vm_pde = gmc_v9_0_get_vm_pde, - .get_vm_pte = gmc_v9_0_get_vm_pte + .get_vm_pte = gmc_v9_0_get_vm_pte, + .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -871,13 +1156,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { - case CHIP_VEGA20: - adev->mmhub.funcs = &mmhub_v1_0_funcs; - break; case CHIP_ARCTURUS: adev->mmhub.funcs = &mmhub_v9_4_funcs; break; default: + adev->mmhub.funcs = &mmhub_v1_0_funcs; break; } } @@ -901,38 +1184,12 @@ static int gmc_v9_0_early_init(void *handle) return 0; } -static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) -{ - - /* - * TODO: - * Currently there is a bug where some memory client outside - * of the driver writes to first 8M of VRAM on S3 resume, - * this overrides GART which by default gets placed in first 8M and - * causes VM_FAULTS once GTT is accessed. - * Keep the stolen memory reservation until the while this is not solved. - * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init - */ - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - return true; - case CHIP_VEGA12: - case CHIP_VEGA20: - default: - return false; - } -} - static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - if (!gmc_v9_0_keep_stolen_memory(adev)) - amdgpu_bo_late_init(adev); + amdgpu_bo_late_init(adev); r = amdgpu_gmc_allocate_vm_inv_eng(adev); if (r) @@ -969,10 +1226,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = 0; - if (adev->asic_type == CHIP_ARCTURUS) - base = mmhub_v9_4_get_fb_location(adev); - else if (!amdgpu_sriov_vf(adev)) - base = mmhub_v1_0_get_fb_location(adev); + if (!amdgpu_sriov_vf(adev)) + base = adev->mmhub.funcs->get_fb_location(adev); /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; @@ -1066,50 +1321,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } -static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) +/** + * gmc_v9_0_save_registers - saves regs + * + * @adev: amdgpu_device pointer + * + * This saves potential register values that should be + * restored upon resume + */ +static void gmc_v9_0_save_registers(struct amdgpu_device *adev) { - u32 d1vga_control; - unsigned size; - - /* - * TODO Remove once GART corruption is resolved - * Check related code in gmc_v9_0_sw_fini - * */ - if (gmc_v9_0_keep_stolen_memory(adev)) - return 9 * 1024 * 1024; - - d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); - if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { - size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ - } else { - u32 viewport; - - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: - viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); - size = (REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * - REG_GET_FIELD(viewport, - HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * - 4); - break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - default: - viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); - size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * - REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * - 4); - break; - } - } - /* return 0 if the pre-OS buffer uses up most of vram */ - if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) - return 0; - - return size; + if (adev->asic_type == CHIP_RAVEN) + adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } static int gmc_v9_0_sw_init(void *handle) @@ -1118,10 +1341,8 @@ static int gmc_v9_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v1_0_init(adev); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_init(adev); - else - mmhub_v1_0_init(adev); + + adev->mmhub.funcs->init(adev); spin_lock_init(&adev->gmc.invalidate_lock); @@ -1242,7 +1463,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); + amdgpu_gmc_get_vbios_allocations(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -1268,21 +1489,18 @@ static int gmc_v9_0_sw_init(void *handle) amdgpu_vm_manager_init(adev); + gmc_v9_0_save_registers(adev); + return 0; } static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - void *stolen_vga_buf; amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - - if (gmc_v9_0_keep_stolen_memory(adev)) - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); - amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); amdgpu_gart_fini(adev); @@ -1297,7 +1515,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_VEGA10: if (amdgpu_sriov_vf(adev)) break; - /* fall through */ + fallthrough; case CHIP_VEGA20: soc15_program_register_sequence(adev, golden_settings_mmhub_1_0_0, @@ -1326,10 +1544,10 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) * * This restores register values, saved at suspend. */ -static void gmc_v9_0_restore_registers(struct amdgpu_device *adev) +void gmc_v9_0_restore_registers(struct amdgpu_device *adev) { if (adev->asic_type == CHIP_RAVEN) - WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); + WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); } /** @@ -1353,10 +1571,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - if (adev->asic_type == CHIP_ARCTURUS) - r = mmhub_v9_4_gart_enable(adev); - else - r = mmhub_v1_0_gart_enable(adev); + r = adev->mmhub.funcs->gart_enable(adev); if (r) return r; @@ -1391,11 +1606,10 @@ static int gmc_v9_0_hw_init(void *handle) golden_settings_vega10_hdp, ARRAY_SIZE(golden_settings_vega10_hdp)); + if (adev->mmhub.funcs->update_power_gating) + adev->mmhub.funcs->update_power_gating(adev, true); + switch (adev->asic_type) { - case CHIP_RAVEN: - /* TODO for renoir */ - mmhub_v1_0_update_power_gating(adev, true); - break; case CHIP_ARCTURUS: WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); break; @@ -1421,10 +1635,7 @@ static int gmc_v9_0_hw_init(void *handle) if (!amdgpu_sriov_vf(adev)) { gfxhub_v1_0_set_fault_enable_default(adev, value); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_set_fault_enable_default(adev, value); - else - mmhub_v1_0_set_fault_enable_default(adev, value); + adev->mmhub.funcs->set_fault_enable_default(adev, value); } for (i = 0; i < adev->num_vmhubs; ++i) gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); @@ -1437,20 +1648,6 @@ static int gmc_v9_0_hw_init(void *handle) return r; } -/** - * gmc_v9_0_save_registers - saves regs - * - * @adev: amdgpu_device pointer - * - * This saves potential register values that should be - * restored upon resume - */ -static void gmc_v9_0_save_registers(struct amdgpu_device *adev) -{ - if (adev->asic_type == CHIP_RAVEN) - adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); -} - /** * gmc_v9_0_gart_disable - gart disable * @@ -1461,10 +1658,7 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) { gfxhub_v1_0_gart_disable(adev); - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_gart_disable(adev); - else - mmhub_v1_0_gart_disable(adev); + adev->mmhub.funcs->gart_disable(adev); amdgpu_gart_table_vram_unpin(adev); } @@ -1494,8 +1688,6 @@ static int gmc_v9_0_suspend(void *handle) if (r) return r; - gmc_v9_0_save_registers(adev); - return 0; } @@ -1504,7 +1696,6 @@ static int gmc_v9_0_resume(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gmc_v9_0_restore_registers(adev); r = gmc_v9_0_hw_init(adev); if (r) return r; @@ -1537,10 +1728,7 @@ static int gmc_v9_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_set_clockgating(adev, state); - else - mmhub_v1_0_set_clockgating(adev, state); + adev->mmhub.funcs->set_clockgating(adev, state); athub_v1_0_set_clockgating(adev, state); @@ -1551,10 +1739,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) - mmhub_v9_4_get_clockgating(adev, flags); - else - mmhub_v1_0_get_clockgating(adev, flags); + adev->mmhub.funcs->get_clockgating(adev, flags); athub_v1_0_get_clockgating(adev, flags); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index e0585e8c6c1b74fb690d07c4b16ee8dc5d453a3e..c415c439f6905093b0d35b9785b3a62ef2492523 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; + +void gmc_v9_0_restore_registers(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 7a51c615d22d68a199c5ed30ef2ff1435800be11..845306f63cdb4b7f309093d639513ec0d435b325 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -55,22 +55,18 @@ static int amdgpu_ih_clientid_jpeg[] = { static int jpeg_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) { - u32 harvest; - int i; - - adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; - for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { - harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - adev->jpeg.harvest_config |= 1 << i; - } + u32 harvest; + int i; - if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | - AMDGPU_JPEG_HARVEST_JPEG1)) - return -ENOENT; - } else - adev->jpeg.num_jpeg_inst = 1; + adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->jpeg.harvest_config |= 1 << i; + } + if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 | + AMDGPU_JPEG_HARVEST_JPEG1)) + return -ENOENT; jpeg_v2_5_set_dec_ring_funcs(adev); jpeg_v2_5_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index c41e5590a7019b0cc561c3c0aba3432c2f5be086..3a0dff53654df58b16e2b9a62dcaa62cf5415e45 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -460,15 +460,10 @@ static bool jpeg_v3_0_is_idle(void *handle) static int jpeg_v3_0_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret; - ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, + return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK, UVD_JRBC_STATUS__RB_JOB_DONE_MASK); - if (ret) - return ret; - - return ret; } static int jpeg_v3_0_set_clockgating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c old mode 100755 new mode 100644 index dffcb93ecee52e57684e6f7ee440f6eb7afceef5..45a902b1acb77513119d19f5d050abc30ae9bffb --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -34,7 +34,7 @@ #define mmDAGB0_CNTL_MISC2_RV 0x008f #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0 -u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) +static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP); @@ -51,7 +51,7 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) return base; } -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, +static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; @@ -297,20 +297,19 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) } } -void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, +static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable) { if (amdgpu_sriov_vf(adev)) return; if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); } } -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) +static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { /* @@ -338,7 +337,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) return 0; } -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) +static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; u32 tmp; @@ -373,7 +372,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) +static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; @@ -415,7 +414,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp); } -void mmhub_v1_0_init(struct amdgpu_device *adev) +static void mmhub_v1_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; @@ -525,7 +524,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); } -int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, +static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) @@ -549,7 +548,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, return 0; } -void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) +static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) { int data, data1; @@ -781,4 +780,13 @@ const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, + .get_fb_location = mmhub_v1_0_get_fb_location, + .init = mmhub_v1_0_init, + .gart_enable = mmhub_v1_0_gart_enable, + .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default, + .gart_disable = mmhub_v1_0_gart_disable, + .set_clockgating = mmhub_v1_0_set_clockgating, + .get_clockgating = mmhub_v1_0_get_clockgating, + .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs, + .update_power_gating = mmhub_v1_0_update_power_gating, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index c43319e8f945f92a301b28f6cffa33e03fa9cf63..d77f5b65a6186d5cadbe8968ecea88e41a753a60 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -25,18 +25,4 @@ extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs; -u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev); -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev); -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev); -void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void mmhub_v1_0_init(struct amdgpu_device *adev); -int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, - enum amd_clockgating_state state); -void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); -void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, - bool enable); -void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 757fa8e83f5b356ce60f441ce874257925e896dd..2d88278c50bf752b29f0c038f852b0f9f76e9532 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -36,7 +36,130 @@ #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0 -void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, +static const char *mmhub_client_ids_navi1x[][2] = { + [3][0] = "DCEDMC", + [4][0] = "DCEVGA", + [5][0] = "MP0", + [6][0] = "MP1", + [13][0] = "VMC", + [14][0] = "HDP", + [15][0] = "OSS", + [16][0] = "VCNU", + [17][0] = "JPEG", + [18][0] = "VCN", + [3][1] = "DCEDMC", + [4][1] = "DCEXFC", + [5][1] = "DCEVGA", + [6][1] = "DCEDWB", + [7][1] = "MP0", + [8][1] = "MP1", + [9][1] = "DBGU1", + [10][1] = "DBGU0", + [11][1] = "XDP", + [14][1] = "HDP", + [15][1] = "OSS", + [16][1] = "VCNU", + [17][1] = "JPEG", + [18][1] = "VCN", +}; + +static const char *mmhub_client_ids_sienna_cichlid[][2] = { + [3][0] = "DCEDMC", + [4][0] = "DCEVGA", + [5][0] = "MP0", + [6][0] = "MP1", + [8][0] = "VMC", + [9][0] = "VCNU0", + [10][0] = "JPEG", + [12][0] = "VCNU1", + [13][0] = "VCN1", + [14][0] = "HDP", + [15][0] = "OSS", + [32+11][0] = "VCN0", + [0][1] = "DBGU0", + [1][1] = "DBGU1", + [2][1] = "DCEDWB", + [3][1] = "DCEDMC", + [4][1] = "DCEVGA", + [5][1] = "MP0", + [6][1] = "MP1", + [7][1] = "XDP", + [9][1] = "VCNU0", + [10][1] = "JPEG", + [11][1] = "VCN0", + [12][1] = "VCNU1", + [13][1] = "VCN1", + [14][1] = "HDP", + [15][1] = "OSS", +}; + +static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid, + uint32_t flush_type) +{ + u32 req = 0; + + /* invalidate using legacy mode on vmid*/ + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vmid); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + +static void +mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, + uint32_t status) +{ + uint32_t cid, rw; + const char *mmhub_cid = NULL; + + cid = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, CID); + rw = REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, RW); + + dev_err(adev->dev, + "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", + status); + switch (adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: + mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; + break; + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; + break; + default: + mmhub_cid = NULL; + break; + } + dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", + mmhub_cid ? mmhub_cid : "unknown", cid); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + dev_err(adev->dev, "\t RW: 0x%x\n", rw); +} + +static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; @@ -134,6 +257,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -189,6 +318,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) { + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); @@ -268,7 +403,7 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) } } -int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) +static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) { /* GART Enable. */ mmhub_v2_0_init_gart_aperture_regs(adev); @@ -284,7 +419,7 @@ int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) return 0; } -void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) +static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; u32 tmp; @@ -315,9 +450,16 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) +static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -351,7 +493,12 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp); } -void mmhub_v2_0_init(struct amdgpu_device *adev) +static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { + .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status, + .get_invalidate_req = mmhub_v2_0_get_invalidate_req, +}; + +static void mmhub_v2_0_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; @@ -381,6 +528,16 @@ void mmhub_v2_0_init(struct amdgpu_device *adev) mmMMVM_INVALIDATE_ENG0_REQ; hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; + + hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + + hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs; } static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -471,7 +628,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade } } -int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, +static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) @@ -495,7 +652,7 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, return 0; } -void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) +static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) { int data, data1; @@ -528,3 +685,14 @@ void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) *flags |= AMD_CG_SUPPORT_MC_LS; } + +const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, + .init = mmhub_v2_0_init, + .gart_enable = mmhub_v2_0_gart_enable, + .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default, + .gart_disable = mmhub_v2_0_gart_disable, + .set_clockgating = mmhub_v2_0_set_clockgating, + .get_clockgating = mmhub_v2_0_get_clockgating, + .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h index 3ea4344f0315ca92c78c6a252adc7f2aab3bbff6..f80f461d67dac733d4fdc15b224fa8ee82bc9ab4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h @@ -23,15 +23,6 @@ #ifndef __MMHUB_V2_0_H__ #define __MMHUB_V2_0_H__ -int mmhub_v2_0_gart_enable(struct amdgpu_device *adev); -void mmhub_v2_0_gart_disable(struct amdgpu_device *adev); -void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void mmhub_v2_0_init(struct amdgpu_device *adev); -int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, - enum amd_clockgating_state state); -void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); -void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); +extern const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 9979f54fef57f99f95238f09eef3745af7141753..6c6ad529c65c65fd89811e19fd054305969ae749 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -36,7 +36,7 @@ #define MMHUB_NUM_INSTANCES 2 #define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000 -u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) +static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) { /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */ u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE); @@ -97,7 +97,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, (u32)(adev->gmc.gart_end >> 44)); } -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, +static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { int i; @@ -375,7 +375,7 @@ static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev, } } -int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) +static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) { int i; @@ -397,7 +397,7 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) return 0; } -void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) +static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; u32 tmp; @@ -442,7 +442,7 @@ void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page */ -void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) +static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; int i; @@ -500,7 +500,7 @@ void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) } } -void mmhub_v9_4_init(struct amdgpu_device *adev) +static void mmhub_v9_4_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]}; @@ -630,7 +630,7 @@ static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *ade } } -int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, +static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state) { if (amdgpu_sriov_vf(adev)) @@ -650,7 +650,7 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, return 0; } -void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) +static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) { int data, data1; @@ -1628,4 +1628,12 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v9_4_query_ras_error_count, .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count, + .get_fb_location = mmhub_v9_4_get_fb_location, + .init = mmhub_v9_4_init, + .gart_enable = mmhub_v9_4_gart_enable, + .set_fault_enable_default = mmhub_v9_4_set_fault_enable_default, + .gart_disable = mmhub_v9_4_gart_disable, + .set_clockgating = mmhub_v9_4_set_clockgating, + .get_clockgating = mmhub_v9_4_get_clockgating, + .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h index 1b979773776c0a02e0a5089ca12c4db51b1c2e62..92404a8f66f3c425240d1992e19bb65aac7ed299 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -25,16 +25,4 @@ extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs; -u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev); -int mmhub_v9_4_gart_enable(struct amdgpu_device *adev); -void mmhub_v9_4_gart_disable(struct amdgpu_device *adev); -void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, - bool value); -void mmhub_v9_4_init(struct amdgpu_device *adev); -int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, - enum amd_clockgating_state state); -void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags); -void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, - uint64_t page_table_base); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 5fd67e1cc2a0465ef469434d29e2585ffcd042f0..9c07014d9bd6ce4ba1e03568e349b37b297f4e52 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -238,19 +238,15 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT; - int locked; /* block amdgpu_gpu_recover till msg FLR COMPLETE received, * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. - * - * we can unlock the lock_reset to allow "amdgpu_job_timedout" - * to run gpu_recover() after FLR_NOTIFICATION_CMPL received - * which means host side had finished this VF's FLR. */ - locked = mutex_trylock(&adev->lock_reset); - if (locked) - adev->in_gpu_reset = true; + if (!down_read_trylock(&adev->reset_sem)) + return; + + atomic_set(&adev->in_gpu_reset, 1); do { if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) @@ -261,14 +257,12 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - if (locked) { - adev->in_gpu_reset = false; - mutex_unlock(&adev->lock_reset); - } + atomic_set(&adev->in_gpu_reset, 0); + up_read(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) - && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT) + && (amdgpu_device_has_job_running(adev) || adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) amdgpu_device_gpu_recover(adev, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index ce2bf1fb79ed12a2c7a11cb30eb5c8c9f49eebe0..9c23abf9b140d8f177ee1c2e27371ab77ebbc09c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -259,19 +259,15 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; - int locked; /* block amdgpu_gpu_recover till msg FLR COMPLETE received, * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. - * - * we can unlock the lock_reset to allow "amdgpu_job_timedout" - * to run gpu_recover() after FLR_NOTIFICATION_CMPL received - * which means host side had finished this VF's FLR. */ - locked = mutex_trylock(&adev->lock_reset); - if (locked) - adev->in_gpu_reset = true; + if (!down_read_trylock(&adev->reset_sem)) + return; + + atomic_set(&adev->in_gpu_reset, 1); do { if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) @@ -282,14 +278,13 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - if (locked) { - adev->in_gpu_reset = false; - mutex_unlock(&adev->lock_reset); - } + atomic_set(&adev->in_gpu_reset, 0); + up_read(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) - && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || + && (amdgpu_device_has_job_running(adev) || + adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 350f1bf063c61c2b55b28c31416ec3eda2060e51..74b1e7dc49a9c47c81b579894bd7cdada18ab8b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -306,7 +306,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) } else { WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); } - navi10_ih_reroute_ih(adev); + if (adev->irq.ih1.ring_size) + navi10_ih_reroute_ih(adev); if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { if (ih->use_bus_addr) { @@ -668,19 +669,26 @@ static int navi10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); - if (r) - return r; + adev->irq.ih1.ring_size = 0; + adev->irq.ih2.ring_size = 0; - adev->irq.ih1.use_doorbell = true; - adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + if (adev->asic_type < CHIP_NAVI10) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); - if (r) - return r; + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = + (adev->doorbell_index.ih + 1) << 1; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; - adev->irq.ih2.use_doorbell = true; - adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = + (adev->doorbell_index.ih + 2) << 1; + } r = amdgpu_irq_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index e629156173d31b46b6e2bada2f5ed2a19710a06a..eadc9526d33fe6418c2c0606a6e90e9a194c9f5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -302,6 +302,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device uint32_t bif_doorbell_intr_cntl; struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); struct ras_err_data err_data = {0, 0, 0, NULL}; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); if (REG_GET_FIELD(bif_doorbell_intr_cntl, @@ -312,28 +313,31 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device RAS_CNTLR_INTERRUPT_CLEAR, 1); WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); - /* - * clear error status after ras_controller_intr according to - * hw team and count ue number for query - */ - nbio_v7_4_query_ras_error_count(adev, &err_data); - - /* logging on error counter and printing for awareness */ - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; - - if (err_data.ce_count) - dev_info(adev->dev, "%ld correctable hardware " - "errors detected in %s block, " - "no user action is needed.\n", - obj->err_data.ce_count, - adev->nbio.ras_if->name); - - if (err_data.ue_count) - dev_info(adev->dev, "%ld uncorrectable hardware " - "errors detected in %s block\n", - obj->err_data.ue_count, - adev->nbio.ras_if->name); + if (!ras->disable_ras_err_cnt_harvest) { + /* + * clear error status after ras_controller_intr + * according to hw team and count ue number + * for query + */ + nbio_v7_4_query_ras_error_count(adev, &err_data); + + /* logging on error cnt and printing for awareness */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + + if (err_data.ce_count) + dev_info(adev->dev, "%ld correctable hardware " + "errors detected in %s block, " + "no user action is needed.\n", + obj->err_data.ce_count, + adev->nbio.ras_if->name); + + if (err_data.ue_count) + dev_info(adev->dev, "%ld uncorrectable hardware " + "errors detected in %s block\n", + obj->err_data.ue_count, + adev->nbio.ras_if->name); + } dev_info(adev->dev, "RAS controller interrupt triggered " "by NBIF error\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index da8024c2826eced6c8097bfb79d2c7ec3a4c2039..4d14023562627050998631211770547fa6e7866b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -364,6 +364,7 @@ nv_asic_reset_method(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: return AMD_RESET_METHOD_MODE1; default: if (smu_baco_is_support(smu)) @@ -379,7 +380,7 @@ static int nv_asic_reset(struct amdgpu_device *adev) struct smu_context *smu = &adev->smu; if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { - dev_info(adev->dev, "GPU BACO reset\n"); + dev_info(adev->dev, "BACO reset\n"); ret = smu_baco_enter(smu); if (ret) @@ -387,8 +388,10 @@ static int nv_asic_reset(struct amdgpu_device *adev) ret = smu_baco_exit(smu); if (ret) return ret; - } else + } else { + dev_info(adev->dev, "MODE1 reset\n"); ret = nv_asic_mode1_reset(adev); + } return ret; } @@ -689,6 +692,10 @@ static void nv_init_doorbell_index(struct amdgpu_device *adev) adev->doorbell_index.sdma_doorbell_range = 20; } +static void nv_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs nv_asic_funcs = { .read_disabled_bios = &nv_read_disabled_bios, @@ -708,6 +715,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .need_reset_on_init = &nv_need_reset_on_init, .get_pcie_replay_count = &nv_get_pcie_replay_count, .supports_baco = &nv_asic_supports_baco, + .pre_asic_init = &nv_pre_asic_init, }; static int nv_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index d488d250805d3e7c9e6e5a8a268619b00e432e19..e16874f30d5dc11bc2021d24c81e31dcb18e043f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -179,12 +179,11 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) } break; case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: err = psp_init_ta_microcode(&adev->psp, chip_name); if (err) return err; break; - case CHIP_NAVY_FLOUNDER: - break; default: BUG(); } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 6c9614f77d33e5ffae5004d6e20bb8cc24a36464..75489313dbadf77ad7c48616a40526180a43cf0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -38,6 +38,8 @@ #include "oss/osssys_4_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/renoir_asd.bin"); +MODULE_FIRMWARE("amdgpu/renoir_ta.bin"); + /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -45,7 +47,10 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; const char *chip_name; + char fw_name[30]; int err = 0; + const struct ta_firmware_header_v1_0 *ta_hdr; + DRM_DEBUG("\n"); switch (adev->asic_type) { case CHIP_RENOIR: @@ -56,6 +61,55 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) } err = psp_init_asd_microcode(psp, chip_name); + if (err) + goto out; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v12.0: Failed to load firmware \"%s\"\n", + fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *) + adev->psp.ta_fw->data; + adev->psp.ta_hdcp_ucode_version = + le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); + adev->psp.ta_hdcp_ucode_size = + le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); + adev->psp.ta_hdcp_start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.ta_dtm_ucode_version = + le32_to_cpu(ta_hdr->ta_dtm_ucode_version); + adev->psp.ta_dtm_ucode_size = + le32_to_cpu(ta_hdr->ta_dtm_size_bytes); + adev->psp.ta_dtm_start_addr = + (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + } + + return 0; + +out2: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; +out: + if (err) { + dev_err(adev->dev, + "psp v12.0: Failed to load firmware \"%s\"\n", + fw_name); + } + return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 1b449291f0687e1476fcd34f50d87b450ff0a3d8..455d5e366c69030827d614fbd06f10f645b4ce4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -52,6 +52,8 @@ #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" +#include "amdgpu_dm.h" + static const u32 tahiti_golden_registers[] = { mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, @@ -1215,10 +1217,100 @@ static bool si_read_bios_from_rom(struct amdgpu_device *adev, return true; } -//xxx: not implemented +static void si_set_clk_bypass_mode(struct amdgpu_device *adev) +{ + u32 tmp, i; + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_BYPASS_EN; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp |= SPLL_CTLREQ_CHG; + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) + break; + udelay(1); + } + + tmp = RREG32(CG_SPLL_FUNC_CNTL_2); + tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); + WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + + tmp = RREG32(MPLL_CNTL_MODE); + tmp &= ~MPLL_MCLK_SEL; + WREG32(MPLL_CNTL_MODE, tmp); +} + +static void si_spll_powerdown(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(SPLL_CNTL_MODE); + tmp |= SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_RESET; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(CG_SPLL_FUNC_CNTL); + tmp |= SPLL_SLEEP; + WREG32(CG_SPLL_FUNC_CNTL, tmp); + + tmp = RREG32(SPLL_CNTL_MODE); + tmp &= ~SPLL_SW_DIR_CONTROL; + WREG32(SPLL_CNTL_MODE, tmp); +} + +static int si_gpu_pci_config_reset(struct amdgpu_device *adev) +{ + u32 i; + int r = -EINVAL; + + dev_info(adev->dev, "GPU pci config reset\n"); + + /* set mclk/sclk to bypass */ + si_set_clk_bypass_mode(adev); + /* powerdown spll */ + si_spll_powerdown(adev); + /* disable BM */ + pci_clear_master(adev->pdev); + /* reset */ + amdgpu_device_pci_config_reset(adev); + + udelay(100); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + adev->has_hw_reset = true; + r = 0; + break; + } + udelay(1); + } + + return r; +} + static int si_asic_reset(struct amdgpu_device *adev) { - return 0; + int r; + + dev_info(adev->dev, "PCI CONFIG reset\n"); + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + r = si_gpu_pci_config_reset(adev); + + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + + return r; } static bool si_asic_supports_baco(struct amdgpu_device *adev) @@ -1779,6 +1871,10 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return 0; } +static void si_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, @@ -1800,6 +1896,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .need_reset_on_init = &si_need_reset_on_init, .get_pcie_replay_count = &si_get_pcie_replay_count, .supports_baco = &si_asic_supports_baco, + .pre_asic_init = &si_pre_asic_init, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) @@ -2546,6 +2643,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif else amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); @@ -2560,6 +2661,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif else amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index d55bf64770c48581491ce3153ecc21618f65d6cc..7fb240c4990ca736d20b908fc0be5cd99c7c973b 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -508,14 +508,9 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control) struct amdgpu_device *adev = to_amdgpu_device(control); /* Send PPSMC_MSG_RequestI2CBus */ - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access) - goto Fail; - - - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, true)) + if (!amdgpu_dpm_smu_i2c_bus_access(adev, true)) return true; -Fail: return false; } @@ -523,16 +518,10 @@ static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); - /* Send PPSMC_MSG_RequestI2CBus */ - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access) - goto Fail; - /* Send PPSMC_MSG_ReleaseI2CBus */ - if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, - false)) + if (!amdgpu_dpm_smu_i2c_bus_access(adev, false)) return true; -Fail: return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 84d811b6e48be52204b292a202c7dea92256bd76..2f93c475d6d86c4e57b6964c1cea416d67bc1f74 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -580,10 +580,13 @@ static int soc15_asic_reset(struct amdgpu_device *adev) switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_BACO: + dev_info(adev->dev, "BACO reset\n"); return soc15_asic_baco_reset(adev); case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); return amdgpu_dpm_mode2_reset(adev); default: + dev_info(adev->dev, "MODE1 reset\n"); return soc15_asic_mode1_reset(adev); } } @@ -1026,6 +1029,11 @@ static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) return (nak_r + nak_g); } +static void soc15_pre_asic_init(struct amdgpu_device *adev) +{ + gmc_v9_0_restore_registers(adev); +} + static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, @@ -1046,6 +1054,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, .supports_baco = &soc15_supports_baco, + .pre_asic_init = &soc15_pre_asic_init, }; static const struct amdgpu_asic_funcs vega20_asic_funcs = @@ -1069,6 +1078,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, .supports_baco = &soc15_supports_baco, + .pre_asic_init = &soc15_pre_asic_init, }; static int soc15_common_early_init(void *handle) @@ -1449,7 +1459,8 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable uint32_t def, data; if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) { + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_RENOIR) { def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) diff --git a/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h new file mode 100644 index 0000000000000000000000000000000000000000..f14833fae07c8d4af82b54fa5425869e1fed9b80 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h @@ -0,0 +1,84 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TA_RAP_IF_H +#define _TA_RAP_IF_H + +/* Responses have bit 31 set */ +#define RSP_ID_MASK (1U << 31) +#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) + +enum ta_rap_status { + TA_RAP_STATUS__SUCCESS = 1, + TA_RAP_STATUS__ERROR_GENERIC_FAILURE = 2, + TA_RAP_STATUS__ERROR_CMD_NOT_SUPPORTED = 3, + TA_RAP_STATUS__ERROR_INVALID_VALIDATION_METHOD = 4, + TA_RAP_STATUS__ERROR_NULL_POINTER = 5, + TA_RAP_STATUS__ERROR_NOT_INITIALIZED = 6, + TA_RAP_STATUS__ERROR_VALIDATION_FAILED = 7, + TA_RAP_STATUS__ERROR_ASIC_NOT_SUPPORTED = 8, + TA_RAP_STATUS__ERROR_OPERATION_NOT_PERMISSABLE = 9, + TA_RAP_STATUS__ERROR_ALREADY_INIT = 10, +}; + +enum ta_rap_cmd { + TA_CMD_RAP__INITIALIZE = 1, + TA_CMD_RAP__VALIDATE_L0 = 2, +}; + +enum ta_rap_validation_method { + METHOD_A = 1, +}; + +struct ta_rap_cmd_input_data { + uint8_t reserved[8]; +}; + +struct ta_rap_cmd_output_data { + uint32_t last_subsection; + uint32_t num_total_validate; + uint32_t num_valid; + uint32_t last_validate_addr; + uint32_t last_validate_val; + uint32_t last_validate_val_exptd; +}; + +union ta_rap_cmd_input { + struct ta_rap_cmd_input_data input; +}; + +union ta_rap_cmd_output { + struct ta_rap_cmd_output_data output; +}; + +struct ta_rap_shared_memory { + uint32_t cmd_id; + uint32_t validation_method_id; + uint32_t resp_id; + enum ta_rap_status rap_status; + union ta_rap_cmd_input rap_in_message; + union ta_rap_cmd_output rap_out_message; + uint8_t reserved[64]; +}; + +#endif // #define _TA_RAP_IF_H diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 418cf097c918a58a066d986bdca6d5b82519d3a1..5288617ca55239266449da00f35a67a582a57253 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -32,20 +32,6 @@ #define UMC_6_INST_DIST 0x40000 -/* - * (addr / 256) * 8192, the higher 26 bits in ErrorAddr - * is the index of 8KB block - */ -#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) -/* channel index is the index of 256B block */ -#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) -/* offset in 256B block */ -#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) - -#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++) -#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) -#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) - const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { {2, 18, 11, 27}, {4, 20, 13, 29}, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c new file mode 100644 index 0000000000000000000000000000000000000000..5665c77a9d5874a1c0bda1d1ee2123813d7708bf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -0,0 +1,331 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v8_7.h" +#include "amdgpu_ras.h" +#include "amdgpu.h" + +#include "rsmu/rsmu_0_0_2_offset.h" +#include "rsmu/rsmu_0_0_2_sh_mask.h" +#include "umc/umc_8_7_0_offset.h" +#include "umc/umc_8_7_0_sh_mask.h" + +#define UMC_8_INST_DIST 0x40000 + +const uint32_t + umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM] = { + {2, 11}, {4, 13}, + {1, 8}, {7, 14}, + {10, 3}, {12, 5}, + {9, 0}, {15, 6} +}; + +static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst; +} + +static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_addr; + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt); + + /* select the lower chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear lower chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V8_7_CE_CNT_INIT); + + /* select the higher chip */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, + UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, + ecc_err_cnt_sel); + + /* clear higher chip error count */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, + UMC_V8_7_CE_CNT_INIT); +} + +static void umc_v8_7_clear_error_count(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_clear_error_count_per_channel(adev, + umc_reg_offset); + } +} + +static void umc_v8_7_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt, ecc_err_cnt_addr; + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + /* UMC 8_7_2 registers */ + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 0); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); + *error_count += + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) - + UMC_V8_7_CE_CNT_INIT); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); + *error_count += + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) - + UMC_V8_7_CE_CNT_INIT); + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v8_7_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + /* check the MCUMC_STATUS */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_query_correctable_error_count(adev, + umc_reg_offset, + &(err_data->ce_count)); + umc_v8_7_querry_uncorrectable_error_count(adev, + umc_reg_offset, + &(err_data->ue_count)); + } + + umc_v8_7_clear_error_count(adev); +} + +static void umc_v8_7_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint32_t lsb, mc_umc_status_addr; + uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; + struct eeprom_table_record *err_rec; + uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_addrt0 = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); + + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) { + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); + return; + } + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4); + /* the lowest lsb bits should be ignored */ + lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + err_addr &= ~((0x1ULL << lsb) - 1); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } + + /* clear umc status */ + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); +} + +static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + +static void umc_v8_7_err_cnt_init_per_channel(struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 0); + /* set ce error interrupt type to APIC based interrupt */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrInt, 0x1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + /* set error count to initial value */ + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel, + GeccErrCntCsSel, 1); + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT); +} + +static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_8_reg_offset(adev, + umc_inst, + ch_inst); + + umc_v8_7_err_cnt_init_per_channel(adev, umc_reg_offset); + } +} + +const struct amdgpu_umc_funcs umc_v8_7_funcs = { + .err_cnt_init = umc_v8_7_err_cnt_init, + .ras_late_init = amdgpu_umc_ras_late_init, + .query_ras_error_count = umc_v8_7_query_ras_error_count, + .query_ras_error_address = umc_v8_7_query_ras_error_address, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h new file mode 100644 index 0000000000000000000000000000000000000000..d4d0468e3df50f20416c75470d39df57f248f421 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h @@ -0,0 +1,51 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V8_7_H__ +#define __UMC_V8_7_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +/* HBM Memory Channel Width */ +#define UMC_V8_7_HBM_MEMORY_CHANNEL_WIDTH 128 +/* number of umc channel instance with memory map register access */ +#define UMC_V8_7_CHANNEL_INSTANCE_NUM 2 +/* number of umc instance with memory map register access */ +#define UMC_V8_7_UMC_INSTANCE_NUM 8 +/* total channel instances in one umc block */ +#define UMC_V8_7_TOTAL_CHANNEL_NUM (UMC_V8_7_CHANNEL_INSTANCE_NUM * UMC_V8_7_UMC_INSTANCE_NUM) +/* UMC regiser per channel offset */ +#define UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA 0x400 + +/* EccErrCnt max value */ +#define UMC_V8_7_CE_CNT_MAX 0xffff +/* umc ce interrupt threshold */ +#define UMC_V8_7_CE_INT_THRESHOLD 0xffff +/* umc ce count initial value */ +#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD) + +extern const struct amdgpu_umc_funcs umc_v8_7_funcs; +extern const uint32_t + umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM]; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index e99bef6e235420d14bd824c0d4125df0737a522d..139fac0d8e76e84a7bfd8e63f6a4d8c074b0f270 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -80,23 +80,18 @@ static int vcn_v2_5_early_init(void *handle) adev->vcn.harvest_config = 0; adev->vcn.num_enc_rings = 1; } else { - if (adev->asic_type == CHIP_ARCTURUS) { - u32 harvest; - int i; - - adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - adev->vcn.harvest_config |= 1 << i; - } - - if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | - AMDGPU_VCN_HARVEST_VCN1)) - /* both instances are harvested, disable the block */ - return -ENOENT; - } else - adev->vcn.num_vcn_inst = 1; + u32 harvest; + int i; + adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->vcn.harvest_config |= 1 << i; + } + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | + AMDGPU_VCN_HARVEST_VCN1)) + /* both instances are harvested, disable the block */ + return -ENOENT; adev->vcn.num_enc_rings = 2; } @@ -1108,7 +1103,7 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, { uint32_t data = 0, loop = 0, size = 0; uint64_t addr = table->gpu_addr; - struct mmsch_v1_1_init_header *header = NULL;; + struct mmsch_v1_1_init_header *header = NULL; header = (struct mmsch_v1_1_init_header *)table->cpu_addr; size = header->total_size; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 63e5547cfb16d8a0264c923030fc430bde120afa..589d6cd8adec9d53d41caa3fa741ab90f6698c23 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -198,7 +198,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; } - if (i != 0) + if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0) ring->no_scheduler = true; sprintf(ring->name, "vcn_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, @@ -222,7 +222,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; } - if (i != 1) + if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1) ring->no_scheduler = true; sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index f6f2ed0830b1ad6e0822c1be981372a3a3244d4a..9bcd0eebc6d7e6899cdbf744a707fa18535a3922 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -752,8 +752,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) int r; if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); } else { + dev_info(adev->dev, "PCI CONFIG reset\n"); r = vi_asic_pci_config_reset(adev); } @@ -1066,6 +1068,10 @@ static bool vi_need_reset_on_init(struct amdgpu_device *adev) return false; } +static void vi_pre_asic_init(struct amdgpu_device *adev) +{ +} + static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, @@ -1086,6 +1092,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .need_reset_on_init = &vi_need_reset_on_init, .get_pcie_replay_count = &vi_get_pcie_replay_count, .supports_baco = &vi_asic_supports_baco, + .pre_asic_init = &vi_pre_asic_init, }; #define CZ_REV_BRISTOL(rev) \ @@ -1507,8 +1514,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_MC, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { @@ -1526,8 +1532,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_SDMA, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { @@ -1545,8 +1550,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_HDP, pp_support_state, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } @@ -1560,8 +1564,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_BIF, PP_STATE_SUPPORT_LS, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { if (state == AMD_CG_STATE_UNGATE) @@ -1573,8 +1576,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_BIF, PP_STATE_SUPPORT_CG, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { @@ -1588,8 +1590,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_DRM, PP_STATE_SUPPORT_LS, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { @@ -1603,8 +1604,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle, PP_BLOCK_SYS_ROM, PP_STATE_SUPPORT_CG, pp_state); - if (adev->powerplay.pp_funcs->set_clockgating_by_smu) - amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); + amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 577d901fdb6366252628648a747ba74717989375..affbca7c0050ea4992e9763d70b10412b058f067 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -911,7 +911,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0x705d0000, 0x807c817c, 0x8070ff70, 0x00000080, 0xbf0a7b7c, 0xbf85fff8, - 0xbf82014f, 0xbef4037e, + 0xbf820151, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -1024,61 +1024,62 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xbe883108, 0xbe8a310a, 0xbe8c310c, 0xbe8e310e, 0xbf06807c, 0xbf84fff0, - 0xb9782a05, 0x80788178, - 0xbf0d9972, 0xbf850002, - 0x8f788978, 0xbf820001, - 0x8f788a78, 0xb96e1e06, - 0x8f6e8a6e, 0x80786e78, - 0x8078ff78, 0x00000200, - 0xbef603ff, 0x01000000, - 0xf4211bfa, 0xf0000000, - 0x80788478, 0xf4211b3a, + 0xba80f801, 0x00000000, + 0xbf8a0000, 0xb9782a05, + 0x80788178, 0xbf0d9972, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb96e1e06, 0x8f6e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0xbef603ff, + 0x01000000, 0xf4211bfa, 0xf0000000, 0x80788478, - 0xf4211b7a, 0xf0000000, - 0x80788478, 0xf4211c3a, + 0xf4211b3a, 0xf0000000, + 0x80788478, 0xf4211b7a, 0xf0000000, 0x80788478, - 0xf4211c7a, 0xf0000000, - 0x80788478, 0xf4211eba, + 0xf4211c3a, 0xf0000000, + 0x80788478, 0xf4211c7a, 0xf0000000, 0x80788478, - 0xf4211efa, 0xf0000000, - 0x80788478, 0xf4211e7a, + 0xf4211eba, 0xf0000000, + 0x80788478, 0xf4211efa, 0xf0000000, 0x80788478, - 0xf4211cfa, 0xf0000000, - 0x80788478, 0xf4211bba, + 0xf4211e7a, 0xf0000000, + 0x80788478, 0xf4211cfa, 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef815, 0xbefc036f, - 0xbefe0370, 0xbeff0371, - 0x876f7bff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f7bff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0xb96e2a05, - 0x806e816e, 0xbf0d9972, - 0xbf850002, 0x8f6e896e, - 0xbf820001, 0x8f6e8a6e, - 0x806eff6e, 0x00000200, - 0x806e746e, 0x826f8075, - 0x876fff6f, 0x0000ffff, - 0xf4091c37, 0xfa000050, - 0xf4091d37, 0xfa000060, - 0xf4011e77, 0xfa000074, - 0xbf8cc07f, 0x876fff6d, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbeee0380, + 0xb9eef814, 0xf4211bba, + 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef815, + 0xbefc036f, 0xbefe0370, + 0xbeff0371, 0x876f7bff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f7bff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0xb96e2a05, 0x806e816e, + 0xbf0d9972, 0xbf850002, + 0x8f6e896e, 0xbf820001, + 0x8f6e8a6e, 0x806eff6e, + 0x00000200, 0x806e746e, + 0x826f8075, 0x876fff6f, + 0x0000ffff, 0xf4091c37, + 0xfa000050, 0xf4091d37, + 0xfa000060, 0xf4011e77, + 0xfa000074, 0xbf8cc07f, + 0x876fff6d, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbeee0380, 0x886e6f6e, + 0x876fff6d, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x886e6f6e, 0x876fff6d, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x886e6f6e, - 0x876fff6d, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x886e6f6e, 0x876fff7a, - 0x00800000, 0x906f976f, - 0xb9eef807, 0x876dff6d, - 0x0000ffff, 0x87fe7e7e, - 0x87ea6a6a, 0xb9faf802, - 0xbf8a0000, 0xbe80226c, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x886e6f6e, + 0x876fff7a, 0x00800000, + 0x906f976f, 0xb9eef807, + 0x876dff6d, 0x0000ffff, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9faf802, 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, @@ -1807,7 +1808,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x705d0000, 0x807c817c, 0x8070ff70, 0x00000080, 0xbf0a7b7c, 0xbf85fff8, - 0xbf82013a, 0xbef4037e, + 0xbf82013c, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -1920,50 +1921,51 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbe883108, 0xbe8a310a, 0xbe8c310c, 0xbe8e310e, 0xbf06807c, 0xbf84fff0, - 0xb9782a05, 0x80788178, - 0xbf0d9972, 0xbf850002, - 0x8f788978, 0xbf820001, - 0x8f788a78, 0xb96e1e06, - 0x8f6e8a6e, 0x80786e78, - 0x8078ff78, 0x00000200, - 0xbef603ff, 0x01000000, - 0xf4211bfa, 0xf0000000, - 0x80788478, 0xf4211b3a, + 0xba80f801, 0x00000000, + 0xbf8a0000, 0xb9782a05, + 0x80788178, 0xbf0d9972, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb96e1e06, 0x8f6e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0xbef603ff, + 0x01000000, 0xf4211bfa, 0xf0000000, 0x80788478, - 0xf4211b7a, 0xf0000000, - 0x80788478, 0xf4211c3a, + 0xf4211b3a, 0xf0000000, + 0x80788478, 0xf4211b7a, 0xf0000000, 0x80788478, - 0xf4211c7a, 0xf0000000, - 0x80788478, 0xf4211eba, + 0xf4211c3a, 0xf0000000, + 0x80788478, 0xf4211c7a, 0xf0000000, 0x80788478, - 0xf4211efa, 0xf0000000, - 0x80788478, 0xf4211e7a, + 0xf4211eba, 0xf0000000, + 0x80788478, 0xf4211efa, 0xf0000000, 0x80788478, - 0xf4211cfa, 0xf0000000, - 0x80788478, 0xf4211bba, + 0xf4211e7a, 0xf0000000, + 0x80788478, 0xf4211cfa, 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef815, 0xbefc036f, - 0xbefe0370, 0xbeff0371, - 0x876f7bff, 0x000003ff, - 0xb9ef4803, 0x876f7bff, - 0xfffff800, 0x906f8b6f, - 0xb9efa2c3, 0xb9f3f801, - 0xb96e2a05, 0x806e816e, - 0xbf0d9972, 0xbf850002, - 0x8f6e896e, 0xbf820001, - 0x8f6e8a6e, 0x806eff6e, - 0x00000200, 0x806e746e, - 0x826f8075, 0x876fff6f, - 0x0000ffff, 0xf4091c37, - 0xfa000050, 0xf4091d37, - 0xfa000060, 0xf4011e77, - 0xfa000074, 0xbf8cc07f, - 0x876dff6d, 0x0000ffff, - 0x87fe7e7e, 0x87ea6a6a, - 0xb9faf802, 0xbf8a0000, + 0xb9eef814, 0xf4211bba, + 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef815, + 0xbefc036f, 0xbefe0370, + 0xbeff0371, 0x876f7bff, + 0x000003ff, 0xb9ef4803, + 0x876f7bff, 0xfffff800, + 0x906f8b6f, 0xb9efa2c3, + 0xb9f3f801, 0xb96e2a05, + 0x806e816e, 0xbf0d9972, + 0xbf850002, 0x8f6e896e, + 0xbf820001, 0x8f6e8a6e, + 0x806eff6e, 0x00000200, + 0x806e746e, 0x826f8075, + 0x876fff6f, 0x0000ffff, + 0xf4091c37, 0xfa000050, + 0xf4091d37, 0xfa000060, + 0xf4011e77, 0xfa000074, + 0xbf8cc07f, 0x876dff6d, + 0x0000ffff, 0x87fe7e7e, + 0x87ea6a6a, 0xb9faf802, 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 5b220f2a7501f6e5a6b323634229db903892069d..5081f91190b84013a63c52a8c4176ef336323eb1 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -894,6 +894,11 @@ L_RESTORE_SGPR: s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0 s_cbranch_scc0 L_RESTORE_SGPR_LOOP + // s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception. + // Clear DEBUG_EN before and restore MODE after the barrier. + s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0 + s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG + /* restore HW registers */ L_RESTORE_HWREG: // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR) @@ -976,8 +981,6 @@ L_RESTORE_HWREG: s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu - s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG - s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution L_END_PGM: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index e9b96ad3d9a525a4f0f01adc85180f6651e2945d..b7b16adb0615d6feb2b5b9ec30c8a312c5d879d4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1254,7 +1254,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) return true; } - if (dev->device_info->needs_iommu_device) + if (dev->use_iommu_v2) return false; amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 6a250f8fcfb818a45a3893d6659f669625c7a051..3fac06b281ce0b21e8dd90565cdee81b6b82c87b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -742,6 +742,22 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, return 0; } +static bool kfd_ignore_crat(void) +{ + bool ret; + + if (ignore_crat) + return true; + +#ifndef KFD_SUPPORT_IOMMU_V2 + ret = true; +#else + ret = false; +#endif + + return ret; +} + /* * kfd_create_crat_image_acpi - Allocates memory for CRAT image and * copies CRAT from ACPI (if available). @@ -776,7 +792,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) return -EINVAL; } - if (ignore_crat) { + if (kfd_ignore_crat()) { pr_info("CRAT table disabled by module option\n"); return -ENODATA; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 4bfedaab183f0ee6a316517a7db30a68d69ac913..0e71a0543f987be73d7cb4347bf060ef4908e943 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -29,6 +29,7 @@ #include "cwsr_trap_handler.h" #include "kfd_iommu.h" #include "amdgpu_amdkfd.h" +#include "kfd_smi_events.h" #define MQD_SIZE_ALIGNED 768 @@ -115,6 +116,7 @@ static const struct kfd_device_info carrizo_device_info = { .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; +#endif static const struct kfd_device_info raven_device_info = { .asic_family = CHIP_RAVEN, @@ -133,7 +135,6 @@ static const struct kfd_device_info raven_device_info = { .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; -#endif static const struct kfd_device_info hawaii_device_info = { .asic_family = CHIP_HAWAII, @@ -711,11 +712,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_doorbell_error; } - if (kfd->kfd2kgd->get_hive_id) - kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd); + kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd); - if (kfd->kfd2kgd->get_unique_id) - kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd); + kfd->unique_id = amdgpu_amdkfd_get_unique_id(kfd->kgd); if (kfd_interrupt_init(kfd)) { dev_err(kfd_device, "Error initializing interrupts\n"); @@ -737,6 +736,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto gws_error; } + /* If CRAT is broken, won't set iommu enabled */ + kfd_double_confirm_iommu_support(kfd); + if (kfd_iommu_device_init(kfd)) { dev_err(kfd_device, "Error initializing iommuv2\n"); goto device_iommu_error; @@ -810,6 +812,8 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) if (!kfd->init_complete) return 0; + kfd_smi_event_update_gpu_reset(kfd, false); + kfd->dqm->ops.pre_reset(kfd->dqm); kgd2kfd_suspend(kfd, false); @@ -838,6 +842,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) atomic_set(&kfd->sram_ecc_flag, 0); + kfd_smi_event_update_gpu_reset(kfd, true); + return 0; } @@ -1245,6 +1251,12 @@ void kfd_dec_compute_active(struct kfd_dev *kfd) WARN_ONCE(count < 0, "Compute profile ref. count error"); } +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) +{ + if (kfd) + kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e0e60b0d0669ebcfce6a33fba36691e2323c103b..560adc57a050bf51fd5982e2bcf231d901693a8b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -153,30 +153,6 @@ static void decrement_queue_count(struct device_queue_manager *dqm, dqm->active_cp_queue_count--; } -int read_sdma_queue_counter(uint64_t q_rptr, uint64_t *val) -{ - int ret; - uint64_t tmp = 0; - - if (!val) - return -EINVAL; - /* - * SDMA activity counter is stored at queue's RPTR + 0x8 location. - */ - if (!access_ok((const void __user *)(q_rptr + - sizeof(uint64_t)), sizeof(uint64_t))) { - pr_err("Can't access sdma queue activity counter\n"); - return -EFAULT; - } - - ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t))); - if (!ret) { - *val = tmp; - } - - return ret; -} - static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { struct kfd_dev *dev = qpd->dqm->dev; @@ -552,7 +528,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, /* Get the SDMA queue stats */ if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { - retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr, + retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr, &sdma_val); if (retval) pr_err("Failed to read SDMA queue counter for queue: %d\n", @@ -1473,7 +1449,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, /* Get the SDMA queue stats */ if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { - retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr, + retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr, &sdma_val); if (retval) pr_err("Failed to read SDMA queue counter for queue: %d\n", diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 49d8e324c636f85ede46115b5ac848e74c473da4..16262e5d93f5c33a0af6041cd1f7a471e9d361bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -251,5 +251,11 @@ static inline void dqm_unlock(struct device_queue_manager *dqm) mutex_unlock(&dqm->lock_hidden); } -int read_sdma_queue_counter(uint64_t q_rptr, uint64_t *val); +static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val) +{ + /* + * SDMA activity counter is stored at queue's RPTR + 0x8 location. + */ + return get_user(*val, q_rptr + 1); +} #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 95a82ac455f2ba1609426ecc797231d865b77ec5..309f63a0b34a15e6442488bbc6fbddcd15544a03 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm, SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; if (amdgpu_noretry && - !dqm->dev->device_info->needs_iommu_device) + !dqm->dev->use_iommu_v2) qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index c1166c40ac15e6ee894c8862907b23992871618a..3c22909470f2078343ccda6bd0e7722ae664e70d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -321,7 +321,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - if (!pdd->dev->device_info->needs_iommu_device) { + if (!pdd->dev->use_iommu_v2) { /* dGPUs: SVM aperture starting at 0 * with small reserved space for kernel. * Set them to CANONICAL addresses. @@ -425,7 +425,7 @@ int kfd_init_apertures(struct kfd_process *process) return -EINVAL; } - if (!dev->device_info->needs_iommu_device) { + if (!dev->use_iommu_v2) { /* dGPUs: the reserved space for kernel * before SVM */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 7c8786b9eb0aaad65571d876e138d45aad8a7f13..5a64915abaf7f6330aad2a049af570351a1b08a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -41,7 +41,7 @@ int kfd_iommu_check_device(struct kfd_dev *kfd) struct amd_iommu_device_info iommu_info; int err; - if (!kfd->device_info->needs_iommu_device) + if (!kfd->use_iommu_v2) return -ENODEV; iommu_info.flags = 0; @@ -63,7 +63,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) unsigned int pasid_limit; int err; - if (!kfd->device_info->needs_iommu_device) + if (!kfd->use_iommu_v2) return 0; iommu_info.flags = 0; @@ -109,7 +109,7 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) struct kfd_process *p = pdd->process; int err; - if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND) + if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND) return 0; if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { @@ -284,7 +284,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) */ void kfd_iommu_suspend(struct kfd_dev *kfd) { - if (!kfd->device_info->needs_iommu_device) + if (!kfd->use_iommu_v2) return; kfd_unbind_processes_from_device(kfd); @@ -304,7 +304,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) unsigned int pasid_limit; int err; - if (!kfd->device_info->needs_iommu_device) + if (!kfd->use_iommu_v2) return 0; pasid_limit = kfd_get_pasid_limit(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6727e9de5b8b069fc8bdd0faa9da56ea064b227d..023629f284951ce2679cd7a5027b09c7aa9e5374 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -297,6 +297,9 @@ struct kfd_dev { bool pci_atomic_requested; + /* Use IOMMU v2 flag */ + bool use_iommu_v2; + /* SRAM ECC flag */ atomic_t sram_ecc_flag; @@ -309,6 +312,8 @@ struct kfd_dev { /* Clients watching SMI events */ struct list_head smi_clients; spinlock_t smi_lock; + + uint32_t reset_seq_num; }; enum kfd_mempool { @@ -892,6 +897,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); +void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ int kfd_interrupt_init(struct kfd_dev *dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 40695d52e9a8df98256cd82d57c9c5b4c45bf513..a0e12a79ab7d92affaf984f81c3f4bfcc769b9c5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -87,7 +87,7 @@ struct kfd_sdma_activity_handler_workarea { }; struct temp_sdma_queue_list { - uint64_t rptr; + uint64_t __user *rptr; uint64_t sdma_val; unsigned int queue_id; struct list_head list; @@ -159,7 +159,7 @@ static void kfd_sdma_activity_worker(struct work_struct *work) } INIT_LIST_HEAD(&sdma_q->list); - sdma_q->rptr = (uint64_t)q->properties.read_ptr; + sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr; sdma_q->queue_id = q->properties.queue_id; list_add_tail(&sdma_q->list, &sdma_q_list.list); } @@ -218,7 +218,7 @@ static void kfd_sdma_activity_worker(struct work_struct *work) continue; list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { - if (((uint64_t)q->properties.read_ptr == sdma_q->rptr) && + if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) && (sdma_q->queue_id == q->properties.queue_id)) { list_del(&sdma_q->list); kfree(sdma_q); @@ -270,6 +270,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, kfd_sdma_activity_worker); sdma_activity_work_handler.pdd = pdd; + sdma_activity_work_handler.sdma_activity_counter = 0; schedule_work(&sdma_activity_work_handler.sdma_activity_work); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 7b348bf9df2140ed43a69a1d45aee2d1f2176123..17d1736367ea3e7ab9dddb123594434b57504b2e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -24,6 +24,7 @@ #include #include #include +#include "amdgpu.h" #include "amdgpu_vm.h" #include "kfd_priv.h" #include "kfd_smi_events.h" @@ -148,15 +149,94 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep) return 0; } +static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event, + char *event_msg, int len) +{ + struct kfd_smi_client *client; + + rcu_read_lock(); + + list_for_each_entry_rcu(client, &dev->smi_clients, list) { + if (!(READ_ONCE(client->events) & + KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event))) + continue; + spin_lock(&client->lock); + if (kfifo_avail(&client->fifo) >= len) { + kfifo_in(&client->fifo, event_msg, len); + wake_up_all(&client->wait_queue); + } else { + pr_debug("smi_event(EventID: %u): no space left\n", + smi_event); + } + spin_unlock(&client->lock); + } + + rcu_read_unlock(); +} + +void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +{ + /* + * GpuReset msg = Reset seq number (incremented for + * every reset message sent before GPU reset). + * 1 byte event + 1 byte space + 8 bytes seq num + + * 1 byte \n + 1 byte \0 = 12 + */ + char fifo_in[12]; + int len; + unsigned int event; + + if (list_empty(&dev->smi_clients)) + return; + + memset(fifo_in, 0x0, sizeof(fifo_in)); + + if (post_reset) { + event = KFD_SMI_EVENT_GPU_POST_RESET; + } else { + event = KFD_SMI_EVENT_GPU_PRE_RESET; + ++(dev->reset_seq_num); + } + + len = snprintf(fifo_in, sizeof(fifo_in), "%x %x\n", event, + dev->reset_seq_num); + + add_event_to_kfifo(dev, event, fifo_in, len); +} + +void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, + uint32_t throttle_bitmask) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; + /* + * ThermalThrottle msg = throttle_bitmask(8): + * thermal_interrupt_count(16): + * 1 byte event + 1 byte space + 8 byte throttle_bitmask + + * 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n + + * 1 byte \0 = 29 + */ + char fifo_in[29]; + int len; + + if (list_empty(&dev->smi_clients)) + return; + + len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%llx\n", + KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, + atomic64_read(&adev->smu.throttle_int_counter)); + + add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); +} + void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; struct amdgpu_task_info task_info; /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ - /* 16 bytes event + 1 byte space + 25 bytes msg + 1 byte \n = 43 + /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + + * 1 byte \0 = 29 */ - char fifo_in[43]; - struct kfd_smi_client *client; + char fifo_in[29]; int len; if (list_empty(&dev->smi_clients)) @@ -168,25 +248,10 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) if (!task_info.pid) return; - len = snprintf(fifo_in, 43, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, + len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, task_info.pid, task_info.task_name); - rcu_read_lock(); - - list_for_each_entry_rcu(client, &dev->smi_clients, list) { - if (!(READ_ONCE(client->events) & KFD_SMI_EVENT_VMFAULT)) - continue; - spin_lock(&client->lock); - if (kfifo_avail(&client->fifo) >= len) { - kfifo_in(&client->fifo, fifo_in, len); - wake_up_all(&client->wait_queue); - } - else - pr_debug("smi_event(vmfault): no space left\n"); - spin_unlock(&client->lock); - } - - rcu_read_unlock(); + add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len); } int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index a9cb218fef96eaad15721aa35d9e85b60f8f1d46..b9b0438202e21cf92ac6a71e11c3e0da9f2d02b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -25,5 +25,8 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd); void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid); +void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, + uint32_t throttle_bitmask); +void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index f185f6cbc05c66291f92aa123cd4ddb14ee4b3e3..2b31c3066aaaec6e426292cb865c6c6b9cb0bc25 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -446,7 +446,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count", dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, offs, "simd_count", - dev->node_props.simd_count); + dev->gpu ? dev->node_props.simd_count : 0); sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", dev->node_props.mem_banks_count); sysfs_show_32bit_prop(buffer, offs, "caches_count", @@ -1139,7 +1139,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) /* Discrete GPUs need their own topology device list * entries. Don't assign them to CPU/APU nodes. */ - if (!gpu->device_info->needs_iommu_device && + if (!gpu->use_iommu_v2 && dev->node_props.cpu_cores_count) continue; @@ -1239,7 +1239,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) void *crat_image = NULL; size_t image_size = 0; int proximity_domain; - struct amdgpu_ras *ctx; + struct amdgpu_device *adev; INIT_LIST_HEAD(&temp_topology_device_list); @@ -1388,7 +1388,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * Overwrite ATS capability according to needs_iommu_device to fix * potential missing corresponding bit in CRAT of BIOS. */ - if (dev->gpu->device_info->needs_iommu_device) + if (dev->gpu->use_iommu_v2) dev->node_props.capability |= HSA_CAP_ATS_PRESENT; else dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; @@ -1404,19 +1404,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.max_waves_per_simd = 10; } - ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd)); - if (ctx) { - /* kfd only concerns sram ecc on GFX/SDMA and HBM ecc on UMC */ - dev->node_props.capability |= - (((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) || - ((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ? - HSA_CAP_SRAM_EDCSUPPORTED : 0; - dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? - HSA_CAP_MEM_EDCSUPPORTED : 0; - - dev->node_props.capability |= (ctx->features != 0) ? + adev = (struct amdgpu_device *)(dev->gpu->kgd); + /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */ + dev->node_props.capability |= + ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? + HSA_CAP_SRAM_EDCSUPPORTED : 0; + dev->node_props.capability |= ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? + HSA_CAP_MEM_EDCSUPPORTED : 0; + + if (adev->asic_type != CHIP_VEGA10) + dev->node_props.capability |= (adev->ras_features != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - } kfd_debug_print_topology(); @@ -1515,6 +1513,29 @@ int kfd_numa_node_to_apic_id(int numa_node_id) return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id)); } +void kfd_double_confirm_iommu_support(struct kfd_dev *gpu) +{ + struct kfd_topology_device *dev; + + gpu->use_iommu_v2 = false; + + if (!gpu->device_info->needs_iommu_device) + return; + + down_read(&topology_lock); + + /* Only use IOMMUv2 if there is an APU topology node with no GPU + * assigned yet. This GPU will be assigned to it. + */ + list_for_each_entry(dev, &topology_device_list, list) + if (dev->node_props.cpu_cores_count && + dev->node_props.simd_count && + !dev->gpu) + gpu->use_iommu_v2 = true; + + up_read(&topology_lock); +} + #if defined(CONFIG_DEBUG_FS) int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 34ae4f3a32f4ca580a7caa6a7515d137f054870e..f24abf428534e4f8bde0d760fa5a1e8cabfa18e0 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -6,7 +6,7 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select SND_HDA_COMPONENT if SND_HDA_CORE - select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and @@ -31,6 +31,14 @@ config DRM_AMD_DC_HDCP help Choose this option if you want to support HDCP authentication. +config DRM_AMD_DC_SI + bool "AMD DC support for Southern Islands ASICs" + default n + help + Choose this option to enable new AMD DC support for SI asics + by default. This includes Tahiti, Pitcairn, Cape Verde, Oland. + Hainan is not supported by AMD DC and it has no physical DCE6. + config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" depends on DRM_AMD_DC diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fd96fafec5b88fc0dda9a1135cc4bb210725251d..ec29376667df2a578eb88a83c6718014c419f63d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -207,7 +207,7 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link); static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); - +static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); /* * dm_vblank_get_counter @@ -304,7 +304,7 @@ static struct amdgpu_crtc * get_crtc_by_otg_inst(struct amdgpu_device *adev, int otg_inst) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; @@ -356,7 +356,7 @@ static void dm_pflip_high_irq(void *interrupt_params) return; } - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", @@ -364,7 +364,7 @@ static void dm_pflip_high_irq(void *interrupt_params) AMDGPU_FLIP_SUBMITTED, amdgpu_crtc->crtc_id, amdgpu_crtc); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); return; } @@ -416,7 +416,7 @@ static void dm_pflip_high_irq(void *interrupt_params) e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); e->pipe = amdgpu_crtc->crtc_id; - list_add_tail(&e->base.link, &adev->ddev->vblank_event_list); + list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); e = NULL; } @@ -429,7 +429,7 @@ static void dm_pflip_high_irq(void *interrupt_params) amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", amdgpu_crtc->crtc_id, amdgpu_crtc, @@ -465,7 +465,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) /* BTR processing for pre-DCE12 ASICs */ if (acrtc_state->stream && adev->family < AMDGPU_FAMILY_AI) { - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); mod_freesync_handle_v_update( adev->dm.freesync_module, acrtc_state->stream, @@ -475,7 +475,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) adev->dm.dc, acrtc_state->stream, &acrtc_state->vrr_params.adjust); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } } } @@ -525,7 +525,7 @@ static void dm_crtc_high_irq(void *interrupt_params) if (adev->family < AMDGPU_FAMILY_AI) return; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc_state->stream && acrtc_state->vrr_params.supported && acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { @@ -558,7 +558,7 @@ static void dm_crtc_high_irq(void *interrupt_params) acrtc->pflip_status = AMDGPU_FLIP_NONE; } - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } static int dm_set_clockgating_state(void *handle, @@ -580,7 +580,7 @@ static int dm_early_init(void* handle); static void amdgpu_dm_fbc_init(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct dm_comressor_info *compressor = &adev->dm.compressor; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct drm_display_mode *mode; @@ -622,7 +622,7 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, unsigned char *buf, int max_bytes) { struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct amdgpu_dm_connector *aconnector; @@ -661,7 +661,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev, struct device *hda_kdev, void *data) { struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_audio_component *acomp = data; acomp->ops = &amdgpu_dm_audio_component_ops; @@ -675,7 +675,7 @@ static void amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data) { struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_audio_component *acomp = data; acomp->ops = NULL; @@ -886,7 +886,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #endif int r; - adev->dm.ddev = adev->ddev; + adev->dm.ddev = adev_to_drm(adev); adev->dm.adev = adev; /* Zero all the fields */ @@ -1022,10 +1022,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* TODO: Add_display_info? */ /* TODO use dynamic cursor width */ - adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; - adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; + adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; + adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; - if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { + if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); goto error; @@ -1102,6 +1102,12 @@ static int load_dmcu_fw(struct amdgpu_device *adev) const struct dmcu_firmware_header_v1_0 *hdr; switch(adev->asic_type) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif case CHIP_BONAIRE: case CHIP_HAWAII: case CHIP_KAVERI: @@ -1420,7 +1426,7 @@ static int dm_late_init(void *handle) bool ret = true; if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw) - return detect_mst_link_for_all_connectors(adev->ddev); + return detect_mst_link_for_all_connectors(adev_to_drm(adev)); dmcu = adev->dm.dc->res_pool->dmcu; @@ -1445,12 +1451,12 @@ static int dm_late_init(void *handle) if (dmcu) ret = dmcu_load_iram(dmcu, params); else if (adev->dm.dc->ctx->dmub_srv) - ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params); + ret = dmub_init_abm_config(adev->dm.dc->res_pool, params); if (!ret) return -EINVAL; - return detect_mst_link_for_all_connectors(adev->ddev); + return detect_mst_link_for_all_connectors(adev_to_drm(adev)); } static void s3_handle_mst(struct drm_device *dev, bool suspend) @@ -1688,7 +1694,7 @@ static int dm_suspend(void *handle) struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; - if (adev->in_gpu_reset) { + if (amdgpu_in_reset(adev)) { mutex_lock(&dm->dc_lock); dm->cached_dc_state = dc_copy_state(dm->dc->current_state); @@ -1702,9 +1708,9 @@ static int dm_suspend(void *handle) } WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - s3_handle_mst(adev->ddev, true); + s3_handle_mst(adev_to_drm(adev), true); amdgpu_dm_irq_suspend(adev); @@ -1858,7 +1864,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; - struct drm_device *ddev = adev->ddev; + struct drm_device *ddev = adev_to_drm(adev); struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; @@ -1874,7 +1880,7 @@ static int dm_resume(void *handle) struct dc_state *dc_state; int i, r, j; - if (adev->in_gpu_reset) { + if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; r = dm_dmub_hw_init(adev); @@ -2080,7 +2086,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) return; conn_base = &aconnector->base; - adev = conn_base->dev->dev_private; + adev = drm_to_adev(conn_base->dev); dm = &adev->dm; caps = &dm->backlight_caps; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; @@ -2231,6 +2237,7 @@ void amdgpu_dm_update_connector_after_detect( drm_connector_update_edid_property(connector, aconnector->edid); + drm_add_edid_modes(connector, aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, @@ -2269,7 +2276,7 @@ static void handle_hpd_irq(void *param) struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; #ifdef CONFIG_DRM_AMD_DC_HDCP - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); #endif /* @@ -2402,7 +2409,7 @@ static void handle_hpd_rx_irq(void *param) enum dc_connection_type new_connection_type = dc_connection_none; #ifdef CONFIG_DRM_AMD_DC_HDCP union hpd_irq_data hpd_irq_data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); #endif @@ -2473,7 +2480,7 @@ static void handle_hpd_rx_irq(void *param) static void register_hpd_handlers(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct amdgpu_dm_connector *aconnector; const struct dc_link *dc_link; @@ -2510,6 +2517,89 @@ static void register_hpd_handlers(struct amdgpu_device *adev) } } +#if defined(CONFIG_DRM_AMD_DC_SI) +/* Register IRQ sources and initialize IRQ callbacks */ +static int dce60_register_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r; + int i; + unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + /* + * Actions of amdgpu_irq_add_id(): + * 1. Register a set() function with base driver. + * Base driver will call set() function to enable/disable an + * interrupt in DC hardware. + * 2. Register amdgpu_dm_irq_handler(). + * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts + * coming from DC hardware. + * amdgpu_dm_irq_handler() will re-direct the interrupt to DC + * for acknowledging and handling. */ + + /* Use VBLANK interrupt */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); + if (r) { + DRM_ERROR("Failed to add crtc irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i+1 , 0); + + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params); + } + + /* Use GRPH_PFLIP interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; + i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); + if (r) { + DRM_ERROR("Failed to add page flip irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params); + + } + + /* HPD */ + r = amdgpu_irq_add_id(adev, client_id, + VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); + if (r) { + DRM_ERROR("Failed to add hpd irq id!\n"); + return r; + } + + register_hpd_handlers(adev); + + return 0; +} +#endif + /* Register IRQ sources and initialize IRQ callbacks */ static int dce110_register_irq_handlers(struct amdgpu_device *adev) { @@ -2740,7 +2830,7 @@ static int dm_atomic_get_state(struct drm_atomic_state *state, struct dm_atomic_state **dm_state) { struct drm_device *dev = state->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct drm_private_state *priv_state; @@ -2760,7 +2850,7 @@ static struct dm_atomic_state * dm_atomic_get_new_state(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct drm_private_obj *obj; struct drm_private_state *new_obj_state; @@ -2774,24 +2864,6 @@ dm_atomic_get_new_state(struct drm_atomic_state *state) return NULL; } -static struct dm_atomic_state * -dm_atomic_get_old_state(struct drm_atomic_state *state) -{ - struct drm_device *dev = state->dev; - struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_display_manager *dm = &adev->dm; - struct drm_private_obj *obj; - struct drm_private_state *old_obj_state; - int i; - - for_each_old_private_obj_in_state(state, obj, old_obj_state, i) { - if (obj->funcs == dm->atomic_obj.funcs) - return to_dm_atomic_state(old_obj_state); - } - - return NULL; -} - static struct drm_private_state * dm_atomic_duplicate_state(struct drm_private_obj *obj) { @@ -2839,18 +2911,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev->mode_info.mode_config_initialized = true; - adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; - adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; + adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; + adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; - adev->ddev->mode_config.max_width = 16384; - adev->ddev->mode_config.max_height = 16384; + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; - adev->ddev->mode_config.preferred_depth = 24; - adev->ddev->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.preferred_depth = 24; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; /* indicates support for immediate flip */ - adev->ddev->mode_config.async_page_flip = true; + adev_to_drm(adev)->mode_config.async_page_flip = true; - adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) @@ -2864,18 +2936,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) dc_resource_state_copy_construct_current(adev->dm.dc, state->context); - drm_atomic_private_obj_init(adev->ddev, + drm_atomic_private_obj_init(adev_to_drm(adev), &adev->dm.atomic_obj, &state->base, &dm_atomic_state_funcs); r = amdgpu_display_modeset_create_props(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } r = amdgpu_dm_audio_init(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } return 0; } @@ -2892,6 +2970,8 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) #if defined(CONFIG_ACPI) struct amdgpu_dm_backlight_caps caps; + memset(&caps, 0, sizeof(caps)); + if (dm->backlight_caps.caps_valid) return; @@ -2930,51 +3010,50 @@ static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness) return rc ? 0 : 1; } -static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps, - const uint32_t user_brightness) +static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, + unsigned *min, unsigned *max) { - u32 min, max, conversion_pace; - u32 brightness = user_brightness; - if (!caps) - goto out; + return 0; - if (!caps->aux_support) { - max = caps->max_input_signal; - min = caps->min_input_signal; - /* - * The brightness input is in the range 0-255 - * It needs to be rescaled to be between the - * requested min and max input signal - * It also needs to be scaled up by 0x101 to - * match the DC interface which has a range of - * 0 to 0xffff - */ - conversion_pace = 0x101; - brightness = - user_brightness - * conversion_pace - * (max - min) - / AMDGPU_MAX_BL_LEVEL - + min * conversion_pace; + if (caps->aux_support) { + // Firmware limits are in nits, DC API wants millinits. + *max = 1000 * caps->aux_max_input_signal; + *min = 1000 * caps->aux_min_input_signal; } else { - /* TODO - * We are doing a linear interpolation here, which is OK but - * does not provide the optimal result. We probably want - * something close to the Perceptual Quantizer (PQ) curve. - */ - max = caps->aux_max_input_signal; - min = caps->aux_min_input_signal; - - brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min - + user_brightness * max; - // Multiple the value by 1000 since we use millinits - brightness *= 1000; - brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL); + // Firmware limits are 8-bit, PWM control is 16-bit. + *max = 0x101 * caps->max_input_signal; + *min = 0x101 * caps->min_input_signal; } + return 1; +} + +static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned min, max; -out: - return brightness; + if (!get_brightness_range(caps, &min, &max)) + return brightness; + + // Rescale 0..255 to min..max + return min + DIV_ROUND_CLOSEST((max - min) * brightness, + AMDGPU_MAX_BL_LEVEL); +} + +static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned min, max; + + if (!get_brightness_range(caps, &min, &max)) + return brightness; + + if (brightness < min) + return 0; + // Rescale min..max to 0..255 + return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + max - min); } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) @@ -2990,7 +3069,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) link = (struct dc_link *)dm->backlight_link; - brightness = convert_brightness(&caps, bd->props.brightness); + brightness = convert_brightness_from_user(&caps, bd->props.brightness); // Change brightness based on AUX property if (caps.aux_support) return set_backlight_via_aux(link, brightness); @@ -3007,7 +3086,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) if (ret == DC_ERROR_UNEXPECTED) return bd->props.brightness; - return ret; + return convert_brightness_to_user(&dm->backlight_caps, ret); } static const struct backlight_ops amdgpu_dm_backlight_ops = { @@ -3029,13 +3108,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", - dm->adev->ddev->primary->index); + adev_to_drm(dm->adev)->primary->index); dm->backlight_dev = backlight_device_register(bl_name, - dm->adev->ddev->dev, - dm, - &amdgpu_dm_backlight_ops, - &props); + adev_to_drm(dm->adev)->dev, + dm, + &amdgpu_dm_backlight_ops, + &props); if (IS_ERR(dm->backlight_dev)) DRM_ERROR("DM: Backlight registration failed!\n"); @@ -3241,6 +3320,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + if (dce60_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + goto fail; + } + break; +#endif case CHIP_BONAIRE: case CHIP_HAWAII: case CHIP_KAVERI: @@ -3341,14 +3431,14 @@ static ssize_t s3_debug_store(struct device *device, int ret; int s3_state; struct drm_device *drm_dev = dev_get_drvdata(device); - struct amdgpu_device *adev = drm_dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(drm_dev); ret = kstrtoint(buf, 0, &s3_state); if (ret == 0) { if (s3_state) { dm_resume(adev); - drm_kms_helper_hotplug_event(adev->ddev); + drm_kms_helper_hotplug_event(adev_to_drm(adev)); } else dm_suspend(adev); } @@ -3365,6 +3455,20 @@ static int dm_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; switch (adev->asic_type) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_OLAND: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 2; + adev->mode_info.num_dig = 2; + break; +#endif case CHIP_BONAIRE: case CHIP_HAWAII: adev->mode_info.num_crtc = 6; @@ -3461,7 +3565,7 @@ static int dm_early_init(void *handle) */ #if defined(CONFIG_DEBUG_KERNEL_DC) device_create_file( - adev->ddev->dev, + adev_to_drm(adev)->dev, &dev_attr_s3_debug); #endif @@ -3472,21 +3576,12 @@ static bool modeset_required(struct drm_crtc_state *crtc_state, struct dc_stream_state *new_stream, struct dc_stream_state *old_stream) { - if (!drm_atomic_crtc_needs_modeset(crtc_state)) - return false; - - if (!crtc_state->enable) - return false; - - return crtc_state->active; + return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); } static bool modereset_required(struct drm_crtc_state *crtc_state) { - if (!drm_atomic_crtc_needs_modeset(crtc_state)) - return false; - - return !crtc_state->enable || !crtc_state->active; + return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); } static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -3559,8 +3654,17 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, uint64_t *tiling_flags, bool *tmz_surface) { - struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]); - int r = amdgpu_bo_reserve(rbo, false); + struct amdgpu_bo *rbo; + int r; + + if (!amdgpu_fb) { + *tiling_flags = 0; + *tmz_surface = false; + return 0; + } + + rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]); + r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { /* Don't show error message when returning -ERESTARTSYS */ @@ -3983,13 +4087,10 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, struct drm_crtc_state *crtc_state) { struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); - const struct amdgpu_framebuffer *amdgpu_fb = - to_amdgpu_framebuffer(plane_state->fb); + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); struct dc_scaling_info scaling_info; struct dc_plane_info plane_info; - uint64_t tiling_flags; int ret; - bool tmz_surface = false; bool force_disable_dcc = false; ret = fill_dc_scaling_info(plane_state, &scaling_info); @@ -4001,15 +4102,12 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->clip_rect = scaling_info.clip_rect; dc_plane_state->scaling_quality = scaling_info.scaling_quality; - ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface); - if (ret) - return ret; - force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; - ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, + ret = fill_dc_plane_info_and_addr(adev, plane_state, + dm_plane_state->tiling_flags, &plane_info, &dc_plane_state->address, - tmz_surface, + dm_plane_state->tmz_surface, force_disable_dcc); if (ret) return ret; @@ -4591,7 +4689,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_link_get_link_cap(aconnector->dc_link)); #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dsc_caps.is_dsc_supported) + if (dsc_caps.is_dsc_supported) { + /* Set DSC policy according to dsc_clock_en */ + dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en); + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], &dsc_caps, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, @@ -4599,6 +4700,21 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, &stream->timing, &stream->timing.dsc_cfg)) stream->timing.flags.DSC = 1; + /* Overwrite the stream flag if DSC is enabled through debugfs */ + if (aconnector->dsc_settings.dsc_clock_en) + stream->timing.flags.DSC = 1; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width) + stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable, + aconnector->dsc_settings.dsc_slice_width); + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height) + stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable, + aconnector->dsc_settings.dsc_slice_height); + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) + stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; + } #endif } @@ -4612,7 +4728,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) - mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); + if (stream->link->psr_settings.psr_feature_enabled) { // // should decide stream support vsc sdp colorimetry capability @@ -4710,7 +4827,7 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); int rc; irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; @@ -4726,7 +4843,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); int rc = 0; @@ -4805,7 +4922,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, uint64_t val) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_old_state = to_dm_connector_state(connector->state); struct dm_connector_state *dm_new_state = @@ -4860,7 +4977,7 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, uint64_t *val) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct dm_connector_state *dm_state = to_dm_connector_state(state); int ret = -EINVAL; @@ -4910,9 +5027,10 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); const struct dc_link *link = aconnector->dc_link; - struct amdgpu_device *adev = connector->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_display_manager *dm = &adev->dm; + drm_atomic_private_obj_fini(&aconnector->mst_mgr.base); #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) @@ -5095,7 +5213,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct dc_stream_state *old_stream) { struct drm_connector *connector = &aconnector->base; - struct amdgpu_device *adev = connector->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dc_stream_state *stream; const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; @@ -5372,7 +5490,7 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc *dc = adev->dm.dc; struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); int ret = -EINVAL; @@ -5592,6 +5710,10 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane) dc_plane_state_retain(dm_plane_state->dc_state); } + /* Framebuffer hasn't been updated yet, so retain old flags. */ + dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags; + dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface; + return &dm_plane_state->base; } @@ -5626,14 +5748,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, struct list_head list; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; - uint64_t tiling_flags; uint32_t domain; int r; - bool tmz_surface = false; - bool force_disable_dcc = false; - - dm_plane_state_old = to_dm_plane_state(plane->state); - dm_plane_state_new = to_dm_plane_state(new_state); if (!new_state->fb) { DRM_DEBUG_DRIVER("No FB bound\n"); @@ -5677,27 +5793,35 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, return r; } - amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); - - tmz_surface = amdgpu_bo_encrypted(rbo); - ttm_eu_backoff_reservation(&ticket, &list); afb->address = amdgpu_bo_gpu_offset(rbo); amdgpu_bo_ref(rbo); + /** + * We don't do surface updates on planes that have been newly created, + * but we also don't have the afb->address during atomic check. + * + * Fill in buffer attributes depending on the address here, but only on + * newly created planes since they're not being used by DC yet and this + * won't modify global state. + */ + dm_plane_state_old = to_dm_plane_state(plane->state); + dm_plane_state_new = to_dm_plane_state(new_state); + if (dm_plane_state_new->dc_state && - dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { - struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; + dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { + struct dc_plane_state *plane_state = + dm_plane_state_new->dc_state; + bool force_disable_dcc = !plane_state->dcc.enable; - force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; fill_plane_buffer_attributes( adev, afb, plane_state->format, plane_state->rotation, - tiling_flags, &plane_state->tiling_info, - &plane_state->plane_size, &plane_state->dcc, - &plane_state->address, tmz_surface, - force_disable_dcc); + dm_plane_state_new->tiling_flags, + &plane_state->tiling_info, &plane_state->plane_size, + &plane_state->dcc, &plane_state->address, + dm_plane_state_new->tmz_surface, force_disable_dcc); } return 0; @@ -5738,7 +5862,7 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state, static int dm_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - struct amdgpu_device *adev = plane->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(plane->dev); struct dc *dc = adev->dm.dc; struct dm_plane_state *dm_plane_state; struct dc_scaling_info scaling_info; @@ -5907,7 +6031,7 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, num_formats = get_plane_formats(plane, plane_cap, formats, ARRAY_SIZE(formats)); - res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs, + res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, &dm_plane_funcs, formats, num_formats, NULL, plane->type, NULL); if (res) @@ -5941,8 +6065,9 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; - drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, - supported_rotations); + if (dm->adev->asic_type >= CHIP_BONAIRE) + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + supported_rotations); drm_plane_helper_add(plane, &dm_plane_helper_funcs); @@ -6212,7 +6337,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, struct dc_link *link, int link_index) { - struct amdgpu_device *adev = dm->ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dm->ddev); /* * Some of the properties below require access to state, like bpc. @@ -6463,7 +6588,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev, struct amdgpu_encoder *aencoder, uint32_t link_index) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int res = drm_encoder_init(dev, &aencoder->base, @@ -6648,7 +6773,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state) { - struct amdgpu_device *adev = plane->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(plane->dev); struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; @@ -6750,7 +6875,7 @@ static void update_freesync_state_on_stream( if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); vrr_params = new_crtc_state->vrr_params; if (surface) { @@ -6803,7 +6928,7 @@ static void update_freesync_state_on_stream( (int)new_crtc_state->base.vrr_enabled, (int)vrr_params.state); - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } static void pre_update_freesync_state_on_stream( @@ -6826,7 +6951,7 @@ static void pre_update_freesync_state_on_stream( if (!new_stream->timing.h_total || !new_stream->timing.v_total) return; - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); vrr_params = new_crtc_state->vrr_params; if (new_crtc_state->vrr_supported && @@ -6849,7 +6974,7 @@ static void pre_update_freesync_state_on_stream( sizeof(vrr_params.adjust)) != 0); new_crtc_state->vrr_params = vrr_params; - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, @@ -6919,8 +7044,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, long r; unsigned long flags; struct amdgpu_bo *abo; - uint64_t tiling_flags; - bool tmz_surface = false; uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; @@ -7004,28 +7127,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (unlikely(r <= 0)) DRM_ERROR("Waiting for fences timed out!"); - /* - * TODO This might fail and hence better not used, wait - * explicitly on fences instead - * and in general should be called for - * blocking commit to as per framework helpers - */ - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) - DRM_ERROR("failed to reserve buffer before flip\n"); - - amdgpu_bo_get_tiling_flags(abo, &tiling_flags); - - tmz_surface = amdgpu_bo_encrypted(abo); - - amdgpu_bo_unreserve(abo); - fill_dc_plane_info_and_addr( - dm->adev, new_plane_state, tiling_flags, + dm->adev, new_plane_state, + dm_new_plane_state->tiling_flags, &bundle->plane_infos[planes_count], &bundle->flip_addrs[planes_count].address, - tmz_surface, - false); + dm_new_plane_state->tmz_surface, false); DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n", new_plane_state->plane->index, @@ -7203,9 +7310,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * on some ASICs). */ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) - dm_update_pflip_irq_state( - (struct amdgpu_device *)dev->dev_private, - acrtc_attach); + dm_update_pflip_irq_state(drm_to_adev(dev), + acrtc_attach); if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && @@ -7235,7 +7341,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, static void amdgpu_dm_commit_audio(struct drm_device *dev, struct drm_atomic_state *state) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; @@ -7327,7 +7433,7 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); int i; /* @@ -7374,7 +7480,7 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; struct dm_atomic_state *dm_state; struct dc_state *dc_state = NULL, *dc_state_temp = NULL; @@ -7387,6 +7493,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct drm_connector_state *old_con_state, *new_con_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; int crtc_disable_count = 0; + bool mode_set_reset_required = false; drm_atomic_helper_update_legacy_modeset_state(dev, state); @@ -7463,19 +7570,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->enabled = true; acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; + mode_set_reset_required = true; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); /* i.e. reset mode */ - if (dm_old_crtc_state->stream) { - if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active) - amdgpu_dm_psr_disable(dm_old_crtc_state->stream); - + if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); - } + mode_set_reset_required = true; } } /* for_each_crtc_in_state() */ if (dc_state) { + /* if there mode set or reset, disable eDP PSR */ + if (mode_set_reset_required) + amdgpu_dm_psr_disable_all(dm); + dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); WARN_ON(!dc_commit_state(dm->dc, dc_state)); @@ -7677,7 +7786,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * send vblank event on all events not handled in flip and * mark consumed event for drm_atomic_helper_commit_hw_done */ - spin_lock_irqsave(&adev->ddev->event_lock, flags); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->event) @@ -7685,7 +7794,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) new_crtc_state->event = NULL; } - spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); /* Signal HW programming completion */ drm_atomic_helper_commit_hw_done(state); @@ -7957,6 +8066,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, goto fail; } + /* + * TODO: Check VSDB bits to decide whether this should + * be enabled or not. + */ + new_stream->triggered_crtc_reset.enabled = + dm->force_timing_sync; + dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; ret = fill_hdr_info_packet(drm_new_conn_state, @@ -8076,8 +8192,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * We want to do dc stream updates that do not require a * full modeset below. */ - if (!(enable && aconnector && new_crtc_state->enable && - new_crtc_state->active)) + if (!(enable && aconnector && new_crtc_state->active)) return 0; /* * Given above conditions, the dc state cannot be NULL because: @@ -8168,6 +8283,8 @@ static bool should_reset_plane(struct drm_atomic_state *state, * TODO: Come up with a more elegant solution for this. */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { + struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state; + if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -8178,9 +8295,45 @@ static bool should_reset_plane(struct drm_atomic_state *state, if (old_other_state->crtc != new_other_state->crtc) return true; - /* TODO: Remove this once we can handle fast format changes. */ - if (old_other_state->fb && new_other_state->fb && - old_other_state->fb->format != new_other_state->fb->format) + /* Src/dst size and scaling updates. */ + if (old_other_state->src_w != new_other_state->src_w || + old_other_state->src_h != new_other_state->src_h || + old_other_state->crtc_w != new_other_state->crtc_w || + old_other_state->crtc_h != new_other_state->crtc_h) + return true; + + /* Rotation / mirroring updates. */ + if (old_other_state->rotation != new_other_state->rotation) + return true; + + /* Blending updates. */ + if (old_other_state->pixel_blend_mode != + new_other_state->pixel_blend_mode) + return true; + + /* Alpha updates. */ + if (old_other_state->alpha != new_other_state->alpha) + return true; + + /* Colorspace changes. */ + if (old_other_state->color_range != new_other_state->color_range || + old_other_state->color_encoding != new_other_state->color_encoding) + return true; + + /* Framebuffer checks fall at the end. */ + if (!old_other_state->fb || !new_other_state->fb) + continue; + + /* Pixel format changes can require bandwidth updates. */ + if (old_other_state->fb->format != new_other_state->fb->format) + return true; + + old_dm_plane_state = to_dm_plane_state(old_other_state); + new_dm_plane_state = to_dm_plane_state(new_other_state); + + /* Tiling and DCC changes also require bandwidth updates. */ + if (old_dm_plane_state->tiling_flags != + new_dm_plane_state->tiling_flags) return true; } @@ -8260,8 +8413,7 @@ static int dm_update_plane_state(struct dc *dc, dm_old_plane_state->dc_state, dm_state->context)) { - ret = EINVAL; - return ret; + return -EINVAL; } @@ -8302,7 +8454,7 @@ static int dm_update_plane_state(struct dc *dc, plane->base.id, new_plane_crtc->base.id); ret = fill_dc_plane_attributes( - new_plane_crtc->dev->dev_private, + drm_to_adev(new_plane_crtc->dev), dc_new_plane_state, new_plane_state, new_crtc_state); @@ -8348,169 +8500,6 @@ static int dm_update_plane_state(struct dc *dc, return ret; } -static int -dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, - struct drm_atomic_state *state, - enum surface_update_type *out_type) -{ - struct dc *dc = dm->dc; - struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; - int i, j, num_plane, ret = 0; - struct drm_plane_state *old_plane_state, *new_plane_state; - struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; - struct drm_crtc *new_plane_crtc; - struct drm_plane *plane; - - struct drm_crtc *crtc; - struct drm_crtc_state *new_crtc_state, *old_crtc_state; - struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; - struct dc_stream_status *status = NULL; - enum surface_update_type update_type = UPDATE_TYPE_FAST; - struct surface_info_bundle { - struct dc_surface_update surface_updates[MAX_SURFACES]; - struct dc_plane_info plane_infos[MAX_SURFACES]; - struct dc_scaling_info scaling_infos[MAX_SURFACES]; - struct dc_flip_addrs flip_addrs[MAX_SURFACES]; - struct dc_stream_update stream_update; - } *bundle; - - bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); - - if (!bundle) { - DRM_ERROR("Failed to allocate update bundle\n"); - /* Set type to FULL to avoid crashing in DC*/ - update_type = UPDATE_TYPE_FULL; - goto cleanup; - } - - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - - memset(bundle, 0, sizeof(struct surface_info_bundle)); - - new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); - old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); - num_plane = 0; - - if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) { - update_type = UPDATE_TYPE_FULL; - goto cleanup; - } - - if (!new_dm_crtc_state->stream) - continue; - - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { - const struct amdgpu_framebuffer *amdgpu_fb = - to_amdgpu_framebuffer(new_plane_state->fb); - struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane]; - struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane]; - struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane]; - uint64_t tiling_flags; - bool tmz_surface = false; - - new_plane_crtc = new_plane_state->crtc; - new_dm_plane_state = to_dm_plane_state(new_plane_state); - old_dm_plane_state = to_dm_plane_state(old_plane_state); - - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; - - if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) { - update_type = UPDATE_TYPE_FULL; - goto cleanup; - } - - if (crtc != new_plane_crtc) - continue; - - bundle->surface_updates[num_plane].surface = - new_dm_plane_state->dc_state; - - if (new_crtc_state->mode_changed) { - bundle->stream_update.dst = new_dm_crtc_state->stream->dst; - bundle->stream_update.src = new_dm_crtc_state->stream->src; - } - - if (new_crtc_state->color_mgmt_changed) { - bundle->surface_updates[num_plane].gamma = - new_dm_plane_state->dc_state->gamma_correction; - bundle->surface_updates[num_plane].in_transfer_func = - new_dm_plane_state->dc_state->in_transfer_func; - bundle->surface_updates[num_plane].gamut_remap_matrix = - &new_dm_plane_state->dc_state->gamut_remap_matrix; - bundle->stream_update.gamut_remap = - &new_dm_crtc_state->stream->gamut_remap_matrix; - bundle->stream_update.output_csc_transform = - &new_dm_crtc_state->stream->csc_color_matrix; - bundle->stream_update.out_transfer_func = - new_dm_crtc_state->stream->out_transfer_func; - } - - ret = fill_dc_scaling_info(new_plane_state, - scaling_info); - if (ret) - goto cleanup; - - bundle->surface_updates[num_plane].scaling_info = scaling_info; - - if (amdgpu_fb) { - ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface); - if (ret) - goto cleanup; - - ret = fill_dc_plane_info_and_addr( - dm->adev, new_plane_state, tiling_flags, - plane_info, - &flip_addr->address, tmz_surface, - false); - if (ret) - goto cleanup; - - bundle->surface_updates[num_plane].plane_info = plane_info; - bundle->surface_updates[num_plane].flip_addr = flip_addr; - } - - num_plane++; - } - - if (num_plane == 0) - continue; - - ret = dm_atomic_get_state(state, &dm_state); - if (ret) - goto cleanup; - - old_dm_state = dm_atomic_get_old_state(state); - if (!old_dm_state) { - ret = -EINVAL; - goto cleanup; - } - - status = dc_stream_get_status_from_state(old_dm_state->context, - new_dm_crtc_state->stream); - bundle->stream_update.stream = new_dm_crtc_state->stream; - /* - * TODO: DC modifies the surface during this call so we need - * to lock here - find a way to do this without locking. - */ - mutex_lock(&dm->dc_lock); - update_type = dc_check_update_surfaces_for_stream( - dc, bundle->surface_updates, num_plane, - &bundle->stream_update, status); - mutex_unlock(&dm->dc_lock); - - if (update_type > UPDATE_TYPE_MED) { - update_type = UPDATE_TYPE_FULL; - goto cleanup; - } - } - -cleanup: - kfree(bundle); - - *out_type = update_type; - return ret; -} #if defined(CONFIG_DRM_AMD_DC_DCN) static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { @@ -8551,8 +8540,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm * acquired. For full updates case which removes/adds/updates streams on one * CRTC while flipping on another CRTC, acquiring global lock will guarantee * that any such full update commit will wait for completion of any outstanding - * flip using DRMs synchronization events. See - * dm_determine_update_type_for_commit() + * flip using DRMs synchronization events. * * Note that DM adds the affected connectors for all CRTCs in state, when that * might not seem necessary. This is because DC stream creation requires the @@ -8564,7 +8552,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct dm_atomic_state *dm_state = NULL; struct dc *dc = adev->dm.dc; struct drm_connector *connector; @@ -8573,15 +8561,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; - enum surface_update_type update_type = UPDATE_TYPE_FAST; - enum surface_update_type overall_update_type = UPDATE_TYPE_FAST; enum dc_status status; int ret, i; - - /* - * This bool will be set for true for any modeset/reset - * or plane update which implies non fast surface update. - */ bool lock_and_validation_needed = false; ret = drm_atomic_helper_check_modeset(dev, state); @@ -8676,6 +8657,17 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } + /* Prepass for updating tiling flags on new planes. */ + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state); + struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb); + + ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags, + &new_dm_plane_state->tmz_surface); + if (ret) + goto fail; + } + /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, @@ -8764,27 +8756,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) continue; - overall_update_type = UPDATE_TYPE_FULL; lock_and_validation_needed = true; } - ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type); - if (ret) - goto fail; - - if (overall_update_type < update_type) - overall_update_type = update_type; - - /* - * lock_and_validation_needed was an old way to determine if we need to set - * the global lock. Leaving it in to check if we broke any corner cases - * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED - * lock_and_validation_needed false = UPDATE_TYPE_FAST + /** + * Streams and planes are reset when there are changes that affect + * bandwidth. Anything that affects bandwidth needs to go through + * DC global validation to ensure that the configuration can be applied + * to hardware. + * + * We have to currently stall out here in atomic_check for outstanding + * commits to finish in this case because our IRQ handlers reference + * DRM state directly - we can end up disabling interrupts too early + * if we don't. + * + * TODO: Remove this stall and drop DM state private objects. */ - if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) - WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); - - if (overall_update_type > UPDATE_TYPE_FAST) { + if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); if (ret) goto fail; @@ -8866,7 +8854,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - dm_new_crtc_state->update_type = (int)overall_update_type; + dm_new_crtc_state->update_type = lock_and_validation_needed ? + UPDATE_TYPE_FULL : + UPDATE_TYPE_FAST; } /* Must be success */ @@ -8915,7 +8905,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct dm_connector_state *dm_con_state = NULL; struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); bool freesync_capable = false; if (!connector->state) { @@ -9114,3 +9104,34 @@ static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) return dc_link_set_psr_allow_active(stream->link, false, true); } + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * if psr is enabled on any stream + * + * Return: true if success + */ +static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm) +{ + DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n"); + return dc_set_psr_allow_active(dm->dc, false); +} + +void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + int i; + + mutex_lock(&adev->dm.dc_lock); + if (dc->current_state) { + for (i = 0; i < dc->current_state->stream_count; ++i) + dc->current_state->streams[i] + ->triggered_crtc_reset.enabled = + adev->dm.force_timing_sync; + + dm_enable_per_frame_crtc_master_sync(dc->current_state); + dc_trigger_sync(dc, dc->current_state); + } + mutex_unlock(&adev->dm.dc_lock); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index dd1559c743c2c437cdd04d98046c20874d358dfe..a7856ae2e5f5026f3542a59fe1c1edfa5bab88ac 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -340,6 +340,14 @@ struct amdgpu_display_manager { * fake encoders used for DP MST. */ struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; + bool force_timing_sync; +}; + +struct dsc_preferred_settings { + bool dsc_clock_en; + uint32_t dsc_slice_width; + uint32_t dsc_slice_height; + uint32_t dsc_bits_per_pixel; }; struct amdgpu_dm_connector { @@ -389,6 +397,7 @@ struct amdgpu_dm_connector { uint32_t debugfs_dpcd_size; #endif bool force_yuv420_output; + struct dsc_preferred_settings dsc_settings; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -403,6 +412,8 @@ struct dc_plane_state; struct dm_plane_state { struct drm_plane_state base; struct dc_plane_state *dc_state; + uint64_t tiling_flags; + bool tmz_surface; }; struct dm_crtc_state { @@ -485,6 +496,8 @@ void dm_restore_drm_connector_state(struct drm_device *dev, void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct edid *edid); +void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); + #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b321ff654df4238a7453fbb33c293b449d0151d9..5df05f0d18bc9cca6b81e08c29cc46378441bf5c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -308,8 +308,7 @@ static int __set_input_tf(struct dc_transfer_func *func, int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) { struct dc_stream_state *stream = crtc->stream; - struct amdgpu_device *adev = - (struct amdgpu_device *)crtc->base.state->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); bool has_rom = adev->asic_type <= CHIP_RAVEN; struct drm_color_ctm *ctm = NULL; const struct drm_color_lut *degamma_lut, *regamma_lut; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index eaad9099bc0b4bfe91fa56f82d675acd993cdb95..d0699e98db9292c59940b79972868181d1cbb900 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -101,7 +101,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, struct dm_crtc_state *dm_crtc_state, enum amdgpu_dm_pipe_crc_source source) { - struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc_stream_state *stream_state = dm_crtc_state->stream; bool enable = amdgpu_dm_is_valid_crc_source(source); int ret = 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index e5a6d91159498cc50a75c90744ed064474dc7cf8..94fcb086154c958e298cbc4910a23a62ac809691 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -49,6 +49,10 @@ struct dmub_debugfs_trace_entry { uint32_t param1; }; +static inline const char *yesno(bool v) +{ + return v ? "yes" : "no"; +} /* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array * @@ -980,6 +984,161 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf, return read_size - r; } +/* function: Read link's DSC & FEC capabilities + * + * + * Access it with the following command (you need to specify + * connector like DP-1): + * + * cat /sys/kernel/debug/dri/0/DP-X/dp_dsc_fec_support + * + */ +static int dp_dsc_fec_support_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_modeset_acquire_ctx ctx; + struct drm_device *dev = connector->dev; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + int ret = 0; + bool try_again = false; + bool is_fec_supported = false; + bool is_dsc_supported = false; + struct dpcd_caps dpcd_caps; + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + do { + try_again = false; + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); + if (ret) { + if (ret == -EDEADLK) { + ret = drm_modeset_backoff(&ctx); + if (!ret) { + try_again = true; + continue; + } + } + break; + } + if (connector->status != connector_status_connected) { + ret = -ENODEV; + break; + } + dpcd_caps = aconnector->dc_link->dpcd_caps; + if (aconnector->port) { + /* aconnector sets dsc_aux during get_modes call + * if MST connector has it means it can either + * enable DSC on the sink device or on MST branch + * its connected to. + */ + if (aconnector->dsc_aux) { + is_fec_supported = true; + is_dsc_supported = true; + } + } else { + is_fec_supported = dpcd_caps.fec_cap.raw & 0x1; + is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1; + } + } while (try_again); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported)); + seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported)); + + return ret; +} + +/* function: Trigger virtual HPD redetection on connector + * + * This function will perform link rediscovery, link disable + * and enable, and dm connector state update. + * + * Retrigger HPD on an existing connector by echoing 1 into + * its respectful "trigger_hotplug" debugfs entry: + * + * echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug + * + */ +static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + enum dc_connection_type new_connection_type = dc_connection_none; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (!aconnector || !aconnector->dc_link) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) + return -EINVAL; + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + if (param[0] == 1) { + mutex_lock(&aconnector->hpd_lock); + + if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) && + new_connection_type != dc_connection_none) + goto unlock; + + if (!dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) + goto unlock; + + amdgpu_dm_update_connector_after_detect(aconnector); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_hotplug_event(dev); + +unlock: + mutex_unlock(&aconnector->hpd_lock); + } + + kfree(wr_buf); + return size; +} + +/* function: read DSC status on the connector + * + * The read function: dp_dsc_clock_en_read + * returns current status of DSC clock on the connector. + * The return is a boolean flag: 1 or 0. + * + * Access it with the following command (you need to specify + * connector like DP-1): + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_clock_en + * + * Expected output: + * 1 - means that DSC is currently enabled + * 0 - means that DSC is disabled + */ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1037,6 +1196,95 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, return result; } +/* function: write force DSC on the connector + * + * The write function: dp_dsc_clock_en_write + * enables to force DSC on the connector. + * User can write to either force enable DSC + * on the next modeset or set it to driver default + * + * Writing DSC settings is done with the following command: + * - To force enable DSC (you need to specify + * connector like DP-1): + * + * echo 0x1 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en + * + * - To return to default state set the flag to zero and + * let driver deal with DSC automatically + * (you need to specify connector like DP-1): + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en + * + */ +static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx || !pipe_ctx->stream) + goto done; + + aconnector->dsc_settings.dsc_clock_en = param[0]; + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC slice width parameter on the connector + * + * The read function: dp_dsc_slice_width_read + * returns dsc slice width used in the current configuration + * The return is an integer: 0 or other positive number + * + * Access the status with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_width + * + * 0 - means that DSC is disabled + * + * Any other number more than zero represents the + * slice width currently used by DSC in pixels + * + */ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1094,6 +1342,98 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, return result; } +/* function: write DSC slice width parameter + * + * The write function: dp_dsc_slice_width_write + * overwrites automatically generated DSC configuration + * of slice width. + * + * The user has to write the slice width divisible by the + * picture width. + * + * Also the user has to write width in hexidecimal + * rather than in decimal. + * + * Writing DSC settings is done with the following command: + * - To force overwrite slice width: (example sets to 1920 pixels) + * + * echo 0x780 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width + * + * - To stop overwriting and let driver find the optimal size, + * set the width to zero: + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width + * + */ +static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx || !pipe_ctx->stream) + goto done; + + aconnector->dsc_settings.dsc_slice_width = param[0]; + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC slice height parameter on the connector + * + * The read function: dp_dsc_slice_height_read + * returns dsc slice height used in the current configuration + * The return is an integer: 0 or other positive number + * + * Access the status with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_height + * + * 0 - means that DSC is disabled + * + * Any other number more than zero represents the + * slice height currently used by DSC in pixels + * + */ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1151,6 +1491,94 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, return result; } +/* function: write DSC slice height parameter + * + * The write function: dp_dsc_slice_height_write + * overwrites automatically generated DSC configuration + * of slice height. + * + * The user has to write the slice height divisible by the + * picture height. + * + * Also the user has to write height in hexidecimal + * rather than in decimal. + * + * Writing DSC settings is done with the following command: + * - To force overwrite slice height (example sets to 128 pixels): + * + * echo 0x80 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height + * + * - To stop overwriting and let driver find the optimal size, + * set the height to zero: + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height + * + */ +static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + uint8_t param_nums = 0; + long param[1] = {0}; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx || !pipe_ctx->stream) + goto done; + + aconnector->dsc_settings.dsc_slice_height = param[0]; + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC target rate on the connector in bits per pixel + * + * The read function: dp_dsc_bits_per_pixel_read + * returns target rate of compression in bits per pixel + * The return is an integer: 0 or other positive integer + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel + * + * 0 - means that DSC is disabled + */ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1208,6 +1636,94 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, return result; } +/* function: write DSC target rate in bits per pixel + * + * The write function: dp_dsc_bits_per_pixel_write + * overwrites automatically generated DSC configuration + * of DSC target bit rate. + * + * Also the user has to write bpp in hexidecimal + * rather than in decimal. + * + * Writing DSC settings is done with the following command: + * - To force overwrite rate (example sets to 256 bpp x 1/16): + * + * echo 0x100 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel + * + * - To stop overwriting and let driver find the optimal rate, + * set the rate to zero: + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel + * + */ +static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + uint8_t param_nums = 0; + long param[1] = {0}; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx || !pipe_ctx->stream) + goto done; + + aconnector->dsc_settings.dsc_bits_per_pixel = param[0]; + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC picture width parameter on the connector + * + * The read function: dp_dsc_pic_width_read + * returns dsc picture width used in the current configuration + * It is the same as h_addressable of the current + * display's timing + * The return is an integer: 0 or other positive integer + * If 0 then DSC is disabled. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_pic_width + * + * 0 - means that DSC is disabled + */ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1322,6 +1838,21 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, return result; } +/* function: read DSC chunk size parameter on the connector + * + * The read function: dp_dsc_chunk_size_read + * returns dsc chunk size set in the current configuration + * The value is calculated automatically by DSC code + * and depends on slice parameters and bpp target rate + * The return is an integer: 0 or other positive integer + * If 0 then DSC is disabled. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_chunk_size + * + * 0 - means that DSC is disabled + */ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1379,6 +1910,21 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, return result; } +/* function: read DSC slice bpg offset on the connector + * + * The read function: dp_dsc_slice_bpg_offset_read + * returns dsc bpg slice offset set in the current configuration + * The value is calculated automatically by DSC code + * and depends on slice parameters and bpp target rate + * The return is an integer: 0 or other positive integer + * If 0 then DSC is disabled. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_bpg_offset + * + * 0 - means that DSC is disabled + */ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1436,6 +1982,7 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, return result; } +DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); DEFINE_SHOW_ATTRIBUTE(output_bpc); @@ -1446,24 +1993,28 @@ DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); static const struct file_operations dp_dsc_clock_en_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_clock_en_read, + .write = dp_dsc_clock_en_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_slice_width_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_slice_width_read, + .write = dp_dsc_slice_width_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_slice_height_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_slice_height_read, + .write = dp_dsc_slice_height_write, .llseek = default_llseek }; static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = { .owner = THIS_MODULE, .read = dp_dsc_bits_per_pixel_read, + .write = dp_dsc_bits_per_pixel_write, .llseek = default_llseek }; @@ -1491,6 +2042,12 @@ static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = { .llseek = default_llseek }; +static const struct file_operations dp_trigger_hotplug_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_trigger_hotplug, + .llseek = default_llseek +}; + static const struct file_operations dp_link_settings_debugfs_fops = { .owner = THIS_MODULE, .read = dp_link_settings_read, @@ -1557,7 +2114,8 @@ static const struct { {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops}, {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, - {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops} + {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}, + {"dp_dsc_fec_support", &dp_dsc_fec_support_fops} }; #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -1721,7 +2279,7 @@ static int current_backlight_read(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link); @@ -1739,7 +2297,7 @@ static int target_backlight_read(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_display_manager *dm = &adev->dm; unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link); @@ -1777,6 +2335,38 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = { {"amdgpu_mst_topology", &mst_topo}, }; +/* + * Sets the force_timing_sync debug optino from the given string. + * All connected displays will be force synchronized immediately. + * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync + */ +static int force_timing_sync_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.force_timing_sync = (bool)val; + + amdgpu_dm_trigger_timing_sync(adev_to_drm(adev)); + + return 0; +} + +/* + * Gets the force_timing_sync debug option value into the given buffer. + * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync + */ +static int force_timing_sync_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.force_timing_sync; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get, + force_timing_sync_set, "%llu\n"); + /* * Sets the DC visual confirm debug option from the given string. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm @@ -1815,7 +2405,7 @@ int dtn_debugfs_init(struct amdgpu_device *adev) .llseek = default_llseek }; - struct drm_minor *minor = adev->ddev->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; int ret; @@ -1836,5 +2426,8 @@ int dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root, adev, &dmub_fw_state_fops); + debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, + adev, &force_timing_sync_ops); + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index cbcf504f73a51a5e5195ab09d359a91e1846efa3..357778556b06ccf5ab04a5ab930bf7924e8788cd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -719,7 +719,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) */ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; @@ -755,7 +755,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) */ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index a61a294caebe1d1a92a4e27893703fa6a1984bc2..adbb44822e9449626ae120c4364497d5f8646694 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -68,7 +68,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); - if (payload.write) + if (payload.write && result >= 0) result = msg->size; if (result < 0) @@ -242,7 +242,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, struct drm_connector_state *connector_state) { struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); return &adev->dm.mst_encoders[acrtc->crtc_id].base; @@ -311,7 +311,7 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); int i; for (i = 0; i < adev->dm.display_indexes_num; i++) { @@ -338,7 +338,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, { struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; int i; @@ -427,7 +427,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, - dm->adev->ddev, + adev_to_drm(dm->adev), &aconnector->dm_dp_aux.aux, 16, 4, @@ -453,6 +453,10 @@ struct dsc_mst_fairness_params { struct dc_dsc_bw_range bw_range; bool compression_possible; struct drm_dp_mst_port *port; + bool clock_overwrite; + uint32_t slice_width_overwrite; + uint32_t slice_height_overwrite; + uint32_t bpp_overwrite; }; struct dsc_mst_fairness_vars { @@ -486,7 +490,21 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p params[i].timing, ¶ms[i].timing->dsc_cfg)) { params[i].timing->flags.DSC = 1; - params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + + if (params[i].bpp_overwrite) + params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; + else + params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + + if (params[i].slice_width_overwrite) + params[i].timing->dsc_cfg.num_slices_h = DIV_ROUND_UP( + params[i].timing->h_addressable, + params[i].slice_width_overwrite); + + if (params[i].slice_height_overwrite) + params[i].timing->dsc_cfg.num_slices_v = DIV_ROUND_UP( + params[i].timing->v_addressable, + params[i].slice_height_overwrite); } else { params[i].timing->flags.DSC = 0; } @@ -618,7 +636,9 @@ static void try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) { + if (vars[i].dsc_enabled + && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 + && !params[i].clock_overwrite) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; remaining_to_try += 1; @@ -679,6 +699,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; int count = 0; + bool debugfs_overwrite = false; memset(params, 0, sizeof(params)); @@ -697,6 +718,12 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].sink = stream->sink; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].port = aconnector->port; + params[count].clock_overwrite = aconnector->dsc_settings.dsc_clock_en; + if (params[count].clock_overwrite) + debugfs_overwrite = true; + params[count].slice_width_overwrite = aconnector->dsc_settings.dsc_slice_width; + params[count].slice_height_overwrite = aconnector->dsc_settings.dsc_slice_height; + params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); if (!dc_dsc_compute_bandwidth_range( @@ -722,7 +749,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } - if (!drm_dp_mst_atomic_check(state)) { + if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { set_dsc_configs_from_fairness_vars(params, vars, count); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index e0f4f1be16182701bc68864dbe1219dbb86cae23..047b1e2dd8f11e623bd0ec7dde5680ca3a94bc93 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -43,6 +43,10 @@ DC_LIBS += dce110 DC_LIBS += dce100 DC_LIBS += dce80 +ifdef CONFIG_DRM_AMD_DC_SI +DC_LIBS += dce60 +endif + ifdef CONFIG_DRM_AMD_DC_HDCP DC_LIBS += hdcp endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile index 239e86bbec5a17e318f7d82bad3d607bcbcd43d3..ed6b5e9763f6441f312ad604446fb2ad8a19f8ac 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/Makefile +++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile @@ -31,6 +31,15 @@ AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS)) AMD_DISPLAY_FILES += $(AMD_DAL_BIOS) +############################################################################### +# DCE 6x +############################################################################### +# All DCE6.x are derived from DCE6.0, so 6.0 MUST be defined if ANY of +# DCE6.x is compiled. +ifdef CONFIG_DRM_AMD_DC_SI +AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce60/command_table_helper_dce60.o +endif + ############################################################################### # DCE 8x ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 078b7e34418550ab1910074399b1a82a63c51656..2d5c7daaee231c7b8c242a1382e170fbd06fff4e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1108,6 +1108,18 @@ static enum bp_result bios_parser_enable_disp_power_gating( action); } +static enum bp_result bios_parser_enable_lvtma_control( + struct dc_bios *dcb, + uint8_t uc_pwr_on) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + + if (!bp->cmd_tbl.enable_lvtma_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on); +} + static bool bios_parser_is_accelerated_mode( struct dc_bios *dcb) { @@ -2208,7 +2220,9 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_board_layout_info = bios_get_board_layout_info, .pack_data_tables = bios_parser_pack_data_tables, - .get_atom_dc_golden_table = bios_get_atom_dc_golden_table + .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, + + .enable_lvtma_control = bios_parser_enable_lvtma_control }; static bool bios_parser2_construct( diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 5815983caaf80b01e3cbd595969f41e5dda077ee..070459e3e4070dcb31fa5b678673f1e07e386231 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -1877,9 +1877,7 @@ static enum bp_result set_crtc_using_dtd_timing_v3( * but it is 4 either from Edid data (spec CEA 861) * or CEA timing table. */ - params.usV_SyncOffset = - cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1); - + le16_add_cpu(¶ms.usV_SyncOffset, 1); } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index bed91572f82a6a3bd44ab338c8ad76c2d294147e..25bdf1c38e0ad8ffd905b736fdb911ce1e42d97b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -569,10 +569,7 @@ static enum bp_result set_crtc_using_dtd_timing_v3( * but it is 4 either from Edid data (spec CEA 861) * or CEA timing table. */ - params.v_syncoffset = - cpu_to_le16(le16_to_cpu(params.v_syncoffset) + - 1); - + le16_add_cpu(¶ms.v_syncoffset, 1); } } @@ -904,6 +901,61 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) return 0; } +/****************************************************************************** + ****************************************************************************** + ** + ** LVTMA CONTROL + ** + ****************************************************************************** + *****************************************************************************/ + +static enum bp_result enable_lvtma_control( + struct bios_parser *bp, + uint8_t uc_pwr_on); + +static void init_enable_lvtma_control(struct bios_parser *bp) +{ + /* TODO add switch for table vrsion */ + bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control; + +} + +static void enable_lvtma_control_dmcub( + struct dc_dmub_srv *dmcub, + uint8_t uc_pwr_on) +{ + + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__VBIOS; + cmd.cmd_common.header.sub_type = + DMUB_CMD__VBIOS_LVTMA_CONTROL; + cmd.cmd_common.cmd_buffer[0] = + uc_pwr_on; + + dc_dmub_srv_cmd_queue(dmcub, &cmd); + dc_dmub_srv_cmd_execute(dmcub); + dc_dmub_srv_wait_idle(dmcub); + +} + +static enum bp_result enable_lvtma_control( + struct bios_parser *bp, + uint8_t uc_pwr_on) +{ + enum bp_result result = BP_RESULT_FAILURE; + + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv, + uc_pwr_on); + return BP_RESULT_OK; + } + return result; +} + void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) { init_dig_encoder_control(bp); @@ -919,4 +971,5 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) init_set_dce_clock(bp); init_get_smu_clock_info(bp); + init_enable_lvtma_control(bp); } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index 7a2af24dfe605cd752b0139f652d3061ee825d55..7bdce013cde5ab781a58b2fa832968a5ab6a1b7e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -94,7 +94,8 @@ struct cmd_tbl { struct bp_set_dce_clock_parameters *bp_params); unsigned int (*get_smu_clock_info)( struct bios_parser *bp, uint8_t id); - + enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, + uint8_t uc_pwr_on); }; void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c index 253bbb1eea6095bfa3e576d6e9b25016bf83c62e..48b4ef03fc8f83f7720c1abe7e19ab3344224801 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c @@ -36,6 +36,14 @@ bool dal_bios_parser_init_cmd_tbl_helper( enum dce_version dce) { switch (dce) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case DCE_VERSION_6_0: + case DCE_VERSION_6_1: + case DCE_VERSION_6_4: + *h = dal_cmd_tbl_helper_dce60_get_table(); + return true; +#endif + case DCE_VERSION_8_0: case DCE_VERSION_8_1: case DCE_VERSION_8_3: diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h index 4c3789df253df300a7e9597a48ac5271c4cca754..dfd30aaf403269ef6688d9335bd4ba4430963c61 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h @@ -26,6 +26,9 @@ #ifndef __DAL_COMMAND_TABLE_HELPER_H__ #define __DAL_COMMAND_TABLE_HELPER_H__ +#if defined(CONFIG_DRM_AMD_DC_SI) +#include "dce60/command_table_helper_dce60.h" +#endif #include "dce80/command_table_helper_dce80.h" #include "dce110/command_table_helper_dce110.h" #include "dce112/command_table_helper_dce112.h" diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 21ff6b686f5f817b5c8ba7bce0e2b080b5ec806a..74c498b6774d151e3055534828f0bc9c45ec50ed 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -37,6 +37,14 @@ bool dal_bios_parser_init_cmd_tbl_helper2( enum dce_version dce) { switch (dce) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case DCE_VERSION_6_0: + case DCE_VERSION_6_1: + case DCE_VERSION_6_4: + *h = dal_cmd_tbl_helper_dce60_get_table(); + return true; +#endif + case DCE_VERSION_8_0: case DCE_VERSION_8_1: case DCE_VERSION_8_3: diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h index 785fcb20a1b933538d4978582b8fd7cc33533f3e..66e0a3e737681427d8995b0394e7dc66efb8586f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h @@ -26,6 +26,9 @@ #ifndef __DAL_COMMAND_TABLE_HELPER2_H__ #define __DAL_COMMAND_TABLE_HELPER2_H__ +#if defined(CONFIG_DRM_AMD_DC_SI) +#include "dce60/command_table_helper_dce60.h" +#endif #include "dce80/command_table_helper_dce80.h" #include "dce110/command_table_helper_dce110.h" #include "dce112/command_table_helper2_dce112.h" diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c new file mode 100644 index 0000000000000000000000000000000000000000..710221b4f5c5e5ea0795bb3f6940e6d0965ec195 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c @@ -0,0 +1,354 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "atom.h" + +#include "include/grph_object_id.h" +#include "include/grph_object_defs.h" +#include "include/bios_parser_types.h" + +#include "../command_table_helper.h" + +static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action) +{ + uint8_t atom_action = 0; + + switch (action) { + case ENCODER_CONTROL_ENABLE: + atom_action = ATOM_ENABLE; + break; + case ENCODER_CONTROL_DISABLE: + atom_action = ATOM_DISABLE; + break; + case ENCODER_CONTROL_SETUP: + atom_action = ATOM_ENCODER_CMD_SETUP; + break; + case ENCODER_CONTROL_INIT: + atom_action = ATOM_ENCODER_INIT; + break; + default: + BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */ + break; + } + + return atom_action; +} + +static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id) +{ + bool result = false; + + if (atom_engine_id != NULL) + switch (id) { + case ENGINE_ID_DIGA: + *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DIGB: + *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DIGC: + *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DIGD: + *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DIGE: + *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DIGF: + *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DIGG: + *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID; + result = true; + break; + case ENGINE_ID_DACA: + *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID; + result = true; + break; + default: + break; + } + + return result; +} + +static bool clock_source_id_to_atom( + enum clock_source_id id, + uint32_t *atom_pll_id) +{ + bool result = true; + + if (atom_pll_id != NULL) + switch (id) { + case CLOCK_SOURCE_ID_PLL0: + *atom_pll_id = ATOM_PPLL0; + break; + case CLOCK_SOURCE_ID_PLL1: + *atom_pll_id = ATOM_PPLL1; + break; + case CLOCK_SOURCE_ID_PLL2: + *atom_pll_id = ATOM_PPLL2; + break; + case CLOCK_SOURCE_ID_EXTERNAL: + *atom_pll_id = ATOM_PPLL_INVALID; + break; + case CLOCK_SOURCE_ID_DFS: + *atom_pll_id = ATOM_EXT_PLL1; + break; + case CLOCK_SOURCE_ID_VCE: + /* for VCE encoding, + * we need to pass in ATOM_PPLL_INVALID + */ + *atom_pll_id = ATOM_PPLL_INVALID; + break; + case CLOCK_SOURCE_ID_DP_DTO: + /* When programming DP DTO PLL ID should be invalid */ + *atom_pll_id = ATOM_PPLL_INVALID; + break; + case CLOCK_SOURCE_ID_UNDEFINED: + BREAK_TO_DEBUGGER(); /* check when this will happen! */ + *atom_pll_id = ATOM_PPLL_INVALID; + result = false; + break; + default: + result = false; + break; + } + + return result; +} + +static uint8_t clock_source_id_to_atom_phy_clk_src_id( + enum clock_source_id id) +{ + uint8_t atom_phy_clk_src_id = 0; + + switch (id) { + case CLOCK_SOURCE_ID_PLL0: + atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL; + break; + case CLOCK_SOURCE_ID_PLL1: + atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; + break; + case CLOCK_SOURCE_ID_PLL2: + atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL; + break; + case CLOCK_SOURCE_ID_EXTERNAL: + atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT; + break; + default: + atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL; + break; + } + + return atom_phy_clk_src_id >> 2; +} + +static uint8_t signal_type_to_atom_dig_mode(enum signal_type s) +{ + uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; + + switch (s) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_EDP: + atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP; + break; + case SIGNAL_TYPE_LVDS: + atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST; + break; + default: + atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI; + break; + } + + return atom_dig_mode; +} + +static uint8_t hpd_sel_to_atom(enum hpd_source_id id) +{ + uint8_t atom_hpd_sel = 0; + + switch (id) { + case HPD_SOURCEID1: + atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL; + break; + case HPD_SOURCEID2: + atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL; + break; + case HPD_SOURCEID3: + atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL; + break; + case HPD_SOURCEID4: + atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL; + break; + case HPD_SOURCEID5: + atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL; + break; + case HPD_SOURCEID6: + atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL; + break; + case HPD_SOURCEID_UNKNOWN: + default: + atom_hpd_sel = 0; + break; + } + return atom_hpd_sel >> 4; +} + +static uint8_t dig_encoder_sel_to_atom(enum engine_id id) +{ + uint8_t atom_dig_encoder_sel = 0; + + switch (id) { + case ENGINE_ID_DIGA: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL; + break; + case ENGINE_ID_DIGB: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL; + break; + case ENGINE_ID_DIGC: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL; + break; + case ENGINE_ID_DIGD: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL; + break; + case ENGINE_ID_DIGE: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL; + break; + case ENGINE_ID_DIGF: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL; + break; + case ENGINE_ID_DIGG: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL; + break; + default: + atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL; + break; + } + + return atom_dig_encoder_sel; +} + +static uint8_t phy_id_to_atom(enum transmitter t) +{ + uint8_t atom_phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + atom_phy_id = ATOM_PHY_ID_UNIPHYA; + break; + case TRANSMITTER_UNIPHY_B: + atom_phy_id = ATOM_PHY_ID_UNIPHYB; + break; + case TRANSMITTER_UNIPHY_C: + atom_phy_id = ATOM_PHY_ID_UNIPHYC; + break; + case TRANSMITTER_UNIPHY_D: + atom_phy_id = ATOM_PHY_ID_UNIPHYD; + break; + case TRANSMITTER_UNIPHY_E: + atom_phy_id = ATOM_PHY_ID_UNIPHYE; + break; + case TRANSMITTER_UNIPHY_F: + atom_phy_id = ATOM_PHY_ID_UNIPHYF; + break; + case TRANSMITTER_UNIPHY_G: + atom_phy_id = ATOM_PHY_ID_UNIPHYG; + break; + default: + atom_phy_id = ATOM_PHY_ID_UNIPHYA; + break; + } + return atom_phy_id; +} + +static uint8_t disp_power_gating_action_to_atom( + enum bp_pipe_control_action action) +{ + uint8_t atom_pipe_action = 0; + + switch (action) { + case ASIC_PIPE_DISABLE: + atom_pipe_action = ATOM_DISABLE; + break; + case ASIC_PIPE_ENABLE: + atom_pipe_action = ATOM_ENABLE; + break; + case ASIC_PIPE_INIT: + atom_pipe_action = ATOM_INIT; + break; + default: + BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */ + break; + } + + return atom_pipe_action; +} + +static const struct command_table_helper command_table_helper_funcs = { + .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom, + .encoder_action_to_atom = encoder_action_to_atom, + .engine_bp_to_atom = engine_bp_to_atom, + .clock_source_id_to_atom = clock_source_id_to_atom, + .clock_source_id_to_atom_phy_clk_src_id = + clock_source_id_to_atom_phy_clk_src_id, + .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode, + .hpd_sel_to_atom = hpd_sel_to_atom, + .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom, + .phy_id_to_atom = phy_id_to_atom, + .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom, + .assign_control_parameter = + dal_cmd_table_helper_assign_control_parameter, + .clock_source_id_to_ref_clk_src = + dal_cmd_table_helper_clock_source_id_to_ref_clk_src, + .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom, + .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom, + .encoder_mode_bp_to_atom = + dal_cmd_table_helper_encoder_mode_bp_to_atom, +}; + +const struct command_table_helper *dal_cmd_tbl_helper_dce60_get_table(void) +{ + return &command_table_helper_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h new file mode 100644 index 0000000000000000000000000000000000000000..f733be553d5a78f213fec4755703b91952eb731e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_COMMAND_TABLE_HELPER_DCE60_H__ +#define __DAL_COMMAND_TABLE_HELPER_DCE60_H__ + +struct command_table_helper; + +const struct command_table_helper *dal_cmd_tbl_helper_dce60_get_table(void); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 4674aca8f20697f2550e91e2d29b06f128b4a7e1..64f515d74410315c9ef4adb2fcb0e5cec50b0f3b 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -33,6 +33,10 @@ ifdef CONFIG_PPC64 calcs_ccflags := -mhard-float -maltivec endif +ifdef CONFIG_ARM64 +calcs_rcflags := -mgeneral-regs-only +endif + ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 @@ -53,6 +57,9 @@ endif CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags) CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare +CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_rcflags) BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index 6874276bb2a10dfdda4328f6a9676c779bd4a540..1a495759a034350cc81cabe03c6a661ac764ef65 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -30,6 +30,17 @@ AMD_DAL_CLK_MGR = $(addprefix $(AMDDALPATH)/dc/clk_mgr/,$(CLK_MGR)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR) +ifdef CONFIG_DRM_AMD_DC_SI +############################################################################### +# DCE 60 +############################################################################### +CLK_MGR_DCE60 = dce60_clk_mgr.o + +AMD_DAL_CLK_MGR_DCE60 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce60/,$(CLK_MGR_DCE60)) + +AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE60) +endif + ############################################################################### # DCE 100 and DCE8x ############################################################################### @@ -93,6 +104,13 @@ ifdef CONFIG_PPC64 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute) endif +# prevent build errors: +# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types +# this file is unused on arm64, just like on ppc64 +ifdef CONFIG_ARM64 +CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only +endif + AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21)) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 6a345d43028ca82fd5a8a510744631d18645356f..efb909ef7a0f732ca6e905cc5efc3575c150562f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -34,6 +34,7 @@ #include "dce110/dce110_clk_mgr.h" #include "dce112/dce112_clk_mgr.h" #include "dce120/dce120_clk_mgr.h" +#include "dce60/dce60_clk_mgr.h" #include "dcn10/rv1_clk_mgr.h" #include "dcn10/rv2_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h" @@ -123,6 +124,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } switch (asic_id.chip_family) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case FAMILY_SI: + dce60_clk_mgr_construct(ctx, clk_mgr); + break; +#endif case FAMILY_CI: case FAMILY_KV: dce_clk_mgr_construct(ctx, clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..c11c6b3a787d21b2554bca144f6c9136fb9b9c55 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -0,0 +1,174 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dccg.h" +#include "clk_mgr_internal.h" +#include "dce100/dce_clk_mgr.h" +#include "dce110/dce110_clk_mgr.h" +#include "dce60_clk_mgr.h" +#include "reg_helper.h" +#include "dmcu.h" +#include "core_types.h" +#include "dal_asic_id.h" + +/* + * Currently the register shifts and masks in this file are used for dce60 + * which has no DPREFCLK_CNTL register + * TODO: remove this when DENTIST_DISPCLK_CNTL + * is moved to dccg, where it belongs + */ +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#define REG(reg) \ + (clk_mgr->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name + +/* set register offset */ +#define SR(reg_name)\ + .reg_name = mm ## reg_name + +static const struct clk_mgr_registers disp_clk_regs = { + CLK_COMMON_REG_LIST_DCE60_BASE() +}; + +static const struct clk_mgr_shift disp_clk_shift = { + CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(__SHIFT) +}; + +static const struct clk_mgr_mask disp_clk_mask = { + CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(_MASK) +}; + + +/* Max clock values for each state indexed by "enum clocks_state": */ +static const struct state_dependent_clocks dce60_max_clks_by_state[] = { +/* ClocksStateInvalid - should not be used */ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/* ClocksStateUltraLow - not expected to be used for DCE 6.0 */ +{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, +/* ClocksStateLow */ +{ .display_clk_khz = 352000, .pixel_clk_khz = 330000}, +/* ClocksStateNominal */ +{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, +/* ClocksStatePerformance */ +{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; + +int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + int dprefclk_wdivider; + int dp_ref_clk_khz; + int target_div; + + /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ + + /* Read the mmDENTIST_DISPCLK_CNTL to get the currently + * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); + + /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ + target_div = dentist_get_divider_from_did(dprefclk_wdivider); + + /* Calculate the current DFS clock, in kHz.*/ + dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR + * clk_mgr->base.dentist_vco_freq_khz) / target_div; + + return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); +} + +static void dce60_pplib_apply_display_requirements( + struct dc *dc, + struct dc_state *context) +{ + struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; + + pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + + dce110_fill_display_configs(context, pp_display_cfg); + + if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) + dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); +} + +static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) +{ + struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dm_pp_power_level_change_request level_change_req; + int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + patched_disp_clk = patched_disp_clk * 115 / 100; + + level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); + /* get max clock state from PPLIB */ + if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) + || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { + if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req)) + clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; + } + + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) { + patched_disp_clk = dce_set_clock(clk_mgr_base, patched_disp_clk); + clk_mgr_base->clks.dispclk_khz = patched_disp_clk; + } + dce60_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context); +} + + + + + + + + +static struct clk_mgr_funcs dce60_funcs = { + .get_dp_ref_clk_frequency = dce60_get_dp_ref_freq_khz, + .update_clocks = dce60_update_clocks +}; + +void dce60_clk_mgr_construct( + struct dc_context *ctx, + struct clk_mgr_internal *clk_mgr) +{ + dce_clk_mgr_construct(ctx, clk_mgr); + + memcpy(clk_mgr->max_clks_by_state, + dce60_max_clks_by_state, + sizeof(dce60_max_clks_by_state)); + + clk_mgr->regs = &disp_clk_regs; + clk_mgr->clk_mgr_shift = &disp_clk_shift; + clk_mgr->clk_mgr_mask = &disp_clk_mask; + clk_mgr->base.funcs = &dce60_funcs; +} + diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h new file mode 100644 index 0000000000000000000000000000000000000000..eca3e5168089b72262b0d97e83dff6a90b835e55 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef DAL_DC_DCE_DCE60_CLK_MGR_H_ +#define DAL_DC_DCE_DCE60_CLK_MGR_H_ + +#include "dc.h" + +void dce60_clk_mgr_construct( + struct dc_context *ctx, + struct clk_mgr_internal *clk_mgr_dce); + +#endif /* DAL_DC_DCE_DCE60_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index c664404a75d4ff13c4bdd1cd84042f990a645e16..543afa34d87aaa21fbb8a5cd497b3df091eccc34 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -94,6 +94,15 @@ int rn_get_active_display_cnt_wa( return display_count; } +void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; +} + void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -516,6 +525,7 @@ static struct clk_mgr_funcs dcn21_funcs = { .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, .are_clock_states_equal = rn_are_clock_states_equal, + .set_low_power_state = rn_set_low_power_state, .notify_wm_ranges = rn_notify_wm_ranges, .notify_link_rate_change = rn_notify_link_rate_change, }; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 9133646f6d5f0b29714d55d6c4ef37828531e8cc..b0e9b0509568c5d817f80627d7a1cf0fd6de662b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -554,8 +554,7 @@ void dcn3_clk_mgr_construct( void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) { - if (clk_mgr->base.bw_params) - kfree(clk_mgr->base.bw_params); + kfree(clk_mgr->base.bw_params); if (clk_mgr->wm_range_table) dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 92eb1ca1634fcc7f32d4ab8606a556680e5ca9e6..dc463d99ef50395e2b21778e9e779da09ade3dd9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1238,6 +1238,14 @@ bool dc_enable_stereo( return ret; } +void dc_trigger_sync(struct dc *dc, struct dc_state *context) +{ + if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { + enable_timing_multisync(dc, context); + program_timing_sync(dc, context); + } +} + /* * Applies given context to HW and copy it into current context. * It's up to the user to release the src context afterwards. @@ -1297,10 +1305,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (result != DC_OK) return result; - if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { - enable_timing_multisync(dc, context); - program_timing_sync(dc, context); - } + dc_trigger_sync(dc, context); /* Program all planes within new context*/ if (dc->hwss.program_front_end_for_ctx) { @@ -2295,6 +2300,7 @@ static void commit_planes_for_stream(struct dc *dc, enum surface_update_type update_type, struct dc_state *context) { + bool mpcc_disconnected = false; int i, j; struct pipe_ctx *top_pipe_to_program = NULL; @@ -2325,6 +2331,15 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } + if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock && + dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){ + dc->hwss.interdependent_update_lock(dc, context, true); + mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context); + dc->hwss.interdependent_update_lock(dc, context, false); + if (mpcc_disconnected) + dc->hwss.wait_for_pending_cleared(dc, context); + } + for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -2621,7 +2636,7 @@ void dc_commit_updates_for_stream(struct dc *dc, copy_stream_update_to_stream(dc, context, stream, stream_update); - if (update_type > UPDATE_TYPE_FAST) { + if (update_type >= UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { DC_ERROR("Mode validation failed for stream update!\n"); dc_release_state(context); @@ -2933,6 +2948,30 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_ dc->hwss.get_clock(dc, clock_type, clock_cfg); } +/* enable/disable eDP PSR without specify stream for eDP */ +bool dc_set_psr_allow_active(struct dc *dc, bool enable) +{ + int i; + + for (i = 0; i < dc->current_state->stream_count ; i++) { + struct dc_link *link; + struct dc_stream_state *stream = dc->current_state->streams[i]; + + link = stream->link; + if (!link) + continue; + + if (link->psr_settings.psr_feature_enabled) { + if (enable && !link->psr_settings.psr_allow_active) + return dc_link_set_psr_allow_active(link, true, false); + else if (!enable && link->psr_settings.psr_allow_active) + return dc_link_set_psr_allow_active(link, false, false); + } + } + + return true; +} + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) void dc_allow_idle_optimizations(struct dc *dc, bool allow) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4bd6e03a7ef37d6bf8afa98e320750dde83dd6a6..437d1a7a16fe7aa2807a0d5c9439c3955c30bf7a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -763,6 +763,7 @@ static bool detect_dp(struct dc_link *link, sink_caps->signal = dp_passive_dongle_detection(link->ddc, sink_caps, audio_support); + link->dpcd_caps.dongle_type = sink_caps->dongle_type; } return true; @@ -3289,7 +3290,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, true); #endif - dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 9bc03f26efdafd29123aeb34418e135d14eca227..d1d95d3e248a7815a872210c83ed3d0fac87d7ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2431,6 +2431,12 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin return false; } +static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) +{ + *link_setting = link->verified_link_cap; + return true; +} + void decide_link_settings(struct dc_stream_state *stream, struct dc_link_settings *link_setting) { @@ -2456,11 +2462,9 @@ void decide_link_settings(struct dc_stream_state *stream, * TODO: add MST specific link training routine */ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - *link_setting = link->verified_link_cap; - return; - } - - if (link->connector_signal == SIGNAL_TYPE_EDP) { + if (decide_mst_link_settings(link, link_setting)) + return; + } else if (link->connector_signal == SIGNAL_TYPE_EDP) { if (decide_edp_link_settings(link, link_setting, req_bw)) return; } else if (decide_dp_link_settings(link, link_setting, req_bw)) @@ -4409,9 +4413,9 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, + if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, - sizeof(union dpcd_source_backlight_get))) + sizeof(union dpcd_source_backlight_get)) != DC_OK) return false; *backlight_millinits_avg = @@ -4450,9 +4454,9 @@ bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_milli link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *) backlight_millinits, - sizeof(uint32_t))) + sizeof(uint32_t)) != DC_OK) return false; return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 7b5f90ebb133bbd170827cd6d974005e2770b80c..c6b737dd8425508033717fefdd8a9242534a4dd3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -42,6 +42,9 @@ #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" +#if defined(CONFIG_DRM_AMD_DC_SI) +#include "dce60/dce60_resource.h" +#endif #include "dce80/dce80_resource.h" #include "dce100/dce100_resource.h" #include "dce110/dce110_resource.h" @@ -63,6 +66,18 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) enum dce_version dc_version = DCE_VERSION_UNKNOWN; switch (asic_id.chip_family) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case FAMILY_SI: + if (ASIC_REV_IS_TAHITI_P(asic_id.hw_internal_rev) || + ASIC_REV_IS_PITCAIRN_PM(asic_id.hw_internal_rev) || + ASIC_REV_IS_CAPEVERDE_M(asic_id.hw_internal_rev)) + dc_version = DCE_VERSION_6_0; + else if (ASIC_REV_IS_OLAND_M(asic_id.hw_internal_rev)) + dc_version = DCE_VERSION_6_4; + else + dc_version = DCE_VERSION_6_1; + break; +#endif case FAMILY_CI: dc_version = DCE_VERSION_8_0; break; @@ -129,6 +144,20 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, struct resource_pool *res_pool = NULL; switch (dc_version) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case DCE_VERSION_6_0: + res_pool = dce60_create_resource_pool( + init_data->num_virtual_links, dc); + break; + case DCE_VERSION_6_1: + res_pool = dce61_create_resource_pool( + init_data->num_virtual_links, dc); + break; + case DCE_VERSION_6_4: + res_pool = dce64_create_resource_pool( + init_data->num_virtual_links, dc); + break; +#endif case DCE_VERSION_8_0: res_pool = dce80_create_resource_pool( init_data->num_virtual_links, dc); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 0257a900fe2bb779cd1d351e66e8dffa0079fe6d..f42a17d765e375f0ceb3609fe695e1dfdaa84a03 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -708,3 +708,4 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) "\tlink: %d\n", stream->link->link_index); } + diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index f50ef4255020a9a91b14a8ac0a9ff097c98b7997..1d9c8e09c08b2533c8118d6a202e1039b3d08676 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -42,7 +42,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.95" +#define DC_VER "3.2.99" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -1265,6 +1265,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc); void dc_lock_memory_clock_frequency(struct dc *dc); #endif + +bool dc_set_psr_allow_active(struct dc *dc, bool enable); + /******************************************************************************* * DSC Interfaces ******************************************************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index d06d07042a12014a453fc866ca40eb4c9e6c513b..0811f941f4304e8796d381159a92e96f11066e0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -136,6 +136,10 @@ struct dc_vbios_funcs { enum bp_result (*get_atom_dc_golden_table)( struct dc_bios *dcb); + + enum bp_result (*enable_lvtma_control)( + struct dc_bios *bios, + uint8_t uc_pwr_on); }; struct bios_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 3800340a5b4f62f3676f05239512315463bbdc19..768ab38d41cf9948f9b3f967cb7a70d3186e8332 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -51,6 +51,7 @@ struct dc_dsc_policy { int min_slice_height; // Must not be less than 8 uint32_t max_target_bpp; uint32_t min_target_bpp; + bool enable_dsc_when_not_needed; }; bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, @@ -80,4 +81,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); +void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 633442bc7ef268bd094e75ae9a8583700b1d86aa..c246af7c584b0ff28833cfcfc0af81dbc09417fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -233,7 +233,7 @@ struct dc_stream_state { union stream_update_flags update_flags; }; -#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF +#define ABM_LEVEL_IMMEDIATE_DISABLE 255 struct dc_stream_update { struct dc_stream_state *stream; @@ -397,6 +397,8 @@ bool dc_enable_stereo( struct dc_stream_state *streams[], uint8_t stream_count); +/* Triggers multi-stream synchronization. */ +void dc_trigger_sync(struct dc *dc, struct dc_state *context); enum surface_update_type dc_check_update_surfaces_for_stream( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 946ba929c6f64144312768cc68675185df8a122d..aa8e0955db482a8e0ab44c06dfe066455c975c23 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -233,6 +233,7 @@ struct dc_panel_patch { unsigned int skip_scdc_overwrite; unsigned int delay_ignore_msa; unsigned int disable_fec; + unsigned int extra_t3_ms; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index a44effcda49f6985b1114a66903bcf564e86f608..e84d216058543348996b1dadeda28565f74db120 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -46,6 +46,8 @@ SR(BL1_PWM_USER_LEVEL), \ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \ + SR(DC_ABM1_ACE_OFFSET_SLOPE_0), \ + SR(DC_ABM1_ACE_THRES_12), \ SR(BIOS_SCRATCH_2) #define ABM_DCN10_REG_LIST(id)\ @@ -60,6 +62,8 @@ SRI(BL1_PWM_USER_LEVEL, ABM, id), \ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ + SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ + SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) #define ABM_DCN20_REG_LIST() \ @@ -74,10 +78,12 @@ SR(BL1_PWM_USER_LEVEL), \ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \ + SR(DC_ABM1_ACE_OFFSET_SLOPE_0), \ + SR(DC_ABM1_ACE_THRES_12), \ NBIO_SR(BIOS_SCRATCH_2) #if defined(CONFIG_DRM_AMD_DC_DCN3_0) -#define ABM_DCN301_REG_LIST(id)\ +#define ABM_DCN30_REG_LIST(id)\ ABM_COMMON_REG_LIST_DCE_BASE(), \ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ @@ -89,6 +95,8 @@ SRI(BL1_PWM_USER_LEVEL, ABM, id), \ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ + SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ + SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) #endif @@ -208,6 +216,8 @@ struct dce_abm_registers { uint32_t BL1_PWM_USER_LEVEL; uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES; uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS; + uint32_t DC_ABM1_ACE_OFFSET_SLOPE_0; + uint32_t DC_ABM1_ACE_THRES_12; uint32_t MASTER_COMM_CNTL_REG; uint32_t MASTER_COMM_CMD_REG; uint32_t MASTER_COMM_DATA_REG1; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 4080465797127e7130b65cd91a077f8d9aa67c51..2a2a0fdb925393d2aa97788d1eb843fceccb1f39 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -867,6 +867,98 @@ void dce_aud_wall_dto_setup( } } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_aud_wall_dto_setup( + struct audio *audio, + enum signal_type signal, + const struct audio_crtc_info *crtc_info, + const struct audio_pll_info *pll_info) +{ + struct dce_audio *aud = DCE_AUD(audio); + + struct azalia_clock_info clock_info = { 0 }; + + if (dc_is_hdmi_signal(signal)) { + uint32_t src_sel; + + /*DTO0 Programming goal: + -generate 24MHz, 128*Fs from 24MHz + -use DTO0 when an active HDMI port is connected + (optionally a DP is connected) */ + + /* calculate DTO settings */ + get_azalia_clock_info_hdmi( + crtc_info->requested_pixel_clock_100Hz, + crtc_info->calculated_pixel_clock_100Hz, + &clock_info); + + DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock_100Hz = %d"\ + "calculated_pixel_clock_100Hz =%d\n"\ + "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\ + crtc_info->requested_pixel_clock_100Hz,\ + crtc_info->calculated_pixel_clock_100Hz,\ + clock_info.audio_dto_module,\ + clock_info.audio_dto_phase); + + /* On TN/SI, Program DTO source select and DTO select before + programming DTO modulo and DTO phase. These bits must be + programmed first, otherwise there will be no HDMI audio at boot + up. This is a HW sequence change (different from old ASICs). + Caution when changing this programming sequence. + + HDMI enabled, using DTO0 + program master CRTC for DTO0 */ + src_sel = pll_info->dto_source - DTO_SOURCE_ID0; + REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE, + DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel, + DCCG_AUDIO_DTO_SEL, 0); + + /* module */ + REG_UPDATE(DCCG_AUDIO_DTO0_MODULE, + DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module); + + /* phase */ + REG_UPDATE(DCCG_AUDIO_DTO0_PHASE, + DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase); + } else { + /*DTO1 Programming goal: + -generate 24MHz, 128*Fs from 24MHz (DCE6 does not support 512*Fs) + -default is to used DTO1, and switch to DTO0 when an audio + master HDMI port is connected + -use as default for DP + + calculate DTO settings */ + get_azalia_clock_info_dp( + crtc_info->requested_pixel_clock_100Hz, + pll_info, + &clock_info); + + /* Program DTO select before programming DTO modulo and DTO + phase. default to use DTO1 */ + + REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, + DCCG_AUDIO_DTO_SEL, 1); + + /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1) + * Cannot select 512fs for DP + * + * DCE6 has no DCCG_AUDIO_DTO2_USE_512FBR_DTO mask + */ + + /* module */ + REG_UPDATE(DCCG_AUDIO_DTO1_MODULE, + DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module); + + /* phase */ + REG_UPDATE(DCCG_AUDIO_DTO1_PHASE, + DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase); + + /* DCE6 has no DCCG_AUDIO_DTO2_USE_512FBR_DTO mask in DCCG_AUDIO_DTO_SOURCE reg */ + + } +} +#endif + static bool dce_aud_endpoint_valid(struct audio *audio) { uint32_t value; @@ -926,6 +1018,19 @@ static const struct audio_funcs funcs = { .az_configure = dce_aud_az_configure, .destroy = dce_aud_destroy, }; + +#if defined(CONFIG_DRM_AMD_DC_SI) +static const struct audio_funcs dce60_funcs = { + .endpoint_valid = dce_aud_endpoint_valid, + .hw_init = dce_aud_hw_init, + .wall_dto_setup = dce60_aud_wall_dto_setup, + .az_enable = dce_aud_az_enable, + .az_disable = dce_aud_az_disable, + .az_configure = dce_aud_az_configure, + .destroy = dce_aud_destroy, +}; +#endif + void dce_aud_destroy(struct audio **audio) { struct dce_audio *aud = DCE_AUD(*audio); @@ -959,3 +1064,29 @@ struct audio *dce_audio_create( return &audio->base; } +#if defined(CONFIG_DRM_AMD_DC_SI) +struct audio *dce60_audio_create( + struct dc_context *ctx, + unsigned int inst, + const struct dce_audio_registers *reg, + const struct dce_audio_shift *shifts, + const struct dce_audio_mask *masks + ) +{ + struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL); + + if (audio == NULL) { + ASSERT_CRITICAL(audio); + return NULL; + } + + audio->base.ctx = ctx; + audio->base.inst = inst; + audio->base.funcs = &dce60_funcs; + + audio->regs = reg; + audio->shifts = shifts; + audio->masks = masks; + return &audio->base; +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index 1392fab0860ba7d5fcd5b6ced57f8c6f031dfb08..5622d5e32d81896fa545b4cbe51695343f415988 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -64,6 +64,20 @@ SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define AUD_DCE60_MASK_SH_LIST(mask_sh)\ + SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\ + SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\ + SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\ + SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\ + SF(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, mask_sh),\ + SF(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, mask_sh),\ + SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\ + SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\ + SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh), \ + SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh) +#endif struct dce_audio_registers { uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX; @@ -135,6 +149,15 @@ struct audio *dce_audio_create( const struct dce_audio_shift *shifts, const struct dce_audio_mask *masks); +#if defined(CONFIG_DRM_AMD_DC_SI) +struct audio *dce60_audio_create( + struct dc_context *ctx, + unsigned int inst, + const struct dce_audio_registers *reg, + const struct dce_audio_shift *shifts, + const struct dce_audio_mask *masks); +#endif + void dce_aud_destroy(struct audio **audio); void dce_aud_hw_init(struct audio *audio); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h index 5e044c2d3d6d31f001f42648025b689fdc4270cc..93e7f34d4775e164c02db133233790b9cba2487d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h @@ -46,6 +46,24 @@ SR(SMU_INTERRUPT_CONTROL), \ SR(DC_DMCU_SCRATCH) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define DMCU_DCE60_REG_LIST() \ + SR(DMCU_CTRL), \ + SR(DMCU_STATUS), \ + SR(DMCU_RAM_ACCESS_CTRL), \ + SR(DMCU_IRAM_WR_CTRL), \ + SR(DMCU_IRAM_WR_DATA), \ + SR(MASTER_COMM_DATA_REG1), \ + SR(MASTER_COMM_DATA_REG2), \ + SR(MASTER_COMM_DATA_REG3), \ + SR(MASTER_COMM_CMD_REG), \ + SR(MASTER_COMM_CNTL_REG), \ + SR(DMCU_IRAM_RD_CTRL), \ + SR(DMCU_IRAM_RD_DATA), \ + SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \ + SR(DC_DMCU_SCRATCH) +#endif + #define DMCU_DCE80_REG_LIST() \ SR(DMCU_CTRL), \ SR(DMCU_STATUS), \ @@ -104,6 +122,25 @@ STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \ DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define DMCU_MASK_SH_LIST_DCE60(mask_sh) \ + DMCU_SF(DMCU_CTRL, \ + DMCU_ENABLE, mask_sh), \ + DMCU_SF(DMCU_STATUS, \ + UC_IN_STOP_MODE, mask_sh), \ + DMCU_SF(DMCU_STATUS, \ + UC_IN_RESET, mask_sh), \ + DMCU_SF(DMCU_RAM_ACCESS_CTRL, \ + IRAM_HOST_ACCESS_EN, mask_sh), \ + DMCU_SF(DMCU_RAM_ACCESS_CTRL, \ + IRAM_WR_ADDR_AUTO_INC, mask_sh), \ + DMCU_SF(DMCU_RAM_ACCESS_CTRL, \ + IRAM_RD_ADDR_AUTO_INC, mask_sh), \ + DMCU_SF(MASTER_COMM_CMD_REG, \ + MASTER_COMM_CMD_REG_BYTE0, mask_sh), \ + DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh) +#endif + #define DMCU_MASK_SH_LIST_DCE80(mask_sh) \ DMCU_SF(DMCU_CTRL, \ DMCU_ENABLE, mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c index e1c5839a80dcda582e01b38684c735d9a14081ee..4202fadb2c0e9ac84728763b6b5cbdd786c484cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c @@ -85,6 +85,15 @@ void dce_pipe_control_lock(struct dc *dc, } } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_pipe_control_lock(struct dc *dc, + struct pipe_ctx *pipe, + bool lock) +{ + /* DCE6 has no BLND_V_UPDATE_LOCK register */ +} +#endif + void dce_set_blender_mode(struct dce_hwseq *hws, unsigned int blnd_inst, enum blnd_mode mode) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 66b88d6ba398724e67832593b6ab0e48dffb89c1..70bbc1311327f31e9d7e9592a36b068d3c271a2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -110,6 +110,12 @@ SR(BLNDV_CONTROL),\ HWSEQ_PIXEL_RATE_REG_LIST(CRTC) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define HWSEQ_DCE6_REG_LIST() \ + HWSEQ_DCEF_REG_LIST_DCE8(), \ + HWSEQ_PIXEL_RATE_REG_LIST(CRTC) +#endif + #define HWSEQ_DCE8_REG_LIST() \ HWSEQ_DCEF_REG_LIST_DCE8(), \ HWSEQ_BLND_REG_LIST(), \ @@ -488,6 +494,12 @@ struct dce_hwseq_registers { HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define HWSEQ_DCE6_MASK_SH_LIST(mask_sh)\ + .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \ + HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_) +#endif + #define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\ .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\ @@ -836,6 +848,12 @@ void dce_pipe_control_lock(struct dc *dc, void dce_set_blender_mode(struct dce_hwseq *hws, unsigned int blnd_inst, enum blnd_mode mode); +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_pipe_control_lock(struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +#endif + void dce_clock_gating_power_up(struct dce_hwseq *hws, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c index ce30dbf579d4fd0f4f2e06bbd177d6268c4c7f5b..80569a2734eb6c442fabfcd3924cfb7bc3083c7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c @@ -231,6 +231,22 @@ static void dce_ipp_set_degamma( CURSOR2_DEGAMMA_MODE, degamma_type); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_ipp_set_degamma( + struct input_pixel_processor *ipp, + enum ipp_degamma_mode mode) +{ + struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); + uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0; + + ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS || mode == IPP_DEGAMMA_MODE_HW_sRGB); + /* DCE6 does not have CURSOR2_DEGAMMA_MODE bit in DEGAMMA_CONTROL reg */ + REG_SET_2(DEGAMMA_CONTROL, 0, + GRPH_DEGAMMA_MODE, degamma_type, + CURSOR_DEGAMMA_MODE, degamma_type); +} +#endif + static const struct ipp_funcs dce_ipp_funcs = { .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes, .ipp_cursor_set_position = dce_ipp_cursor_set_position, @@ -239,6 +255,17 @@ static const struct ipp_funcs dce_ipp_funcs = { .ipp_set_degamma = dce_ipp_set_degamma }; +#if defined(CONFIG_DRM_AMD_DC_SI) +static const struct ipp_funcs dce60_ipp_funcs = { + .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes, + .ipp_cursor_set_position = dce_ipp_cursor_set_position, + .ipp_program_prescale = dce_ipp_program_prescale, + .ipp_program_input_lut = dce_ipp_program_input_lut, + .ipp_set_degamma = dce60_ipp_set_degamma +}; +#endif + + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -260,6 +287,25 @@ void dce_ipp_construct( ipp_dce->ipp_mask = ipp_mask; } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_ipp_construct( + struct dce_ipp *ipp_dce, + struct dc_context *ctx, + int inst, + const struct dce_ipp_registers *regs, + const struct dce_ipp_shift *ipp_shift, + const struct dce_ipp_mask *ipp_mask) +{ + ipp_dce->base.ctx = ctx; + ipp_dce->base.inst = inst; + ipp_dce->base.funcs = &dce60_ipp_funcs; + + ipp_dce->regs = regs; + ipp_dce->ipp_shift = ipp_shift; + ipp_dce->ipp_mask = ipp_mask; +} +#endif + void dce_ipp_destroy(struct input_pixel_processor **ipp) { kfree(TO_DCE_IPP(*ipp)); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h index ca04e97d44c3ca2cfeeacb546ad0f069c1a59b33..0028d4bdd81bbf6bb50fea539bb5272966db3838 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h @@ -147,6 +147,46 @@ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \ + IPP_SF(CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \ + IPP_SF(CUR_CONTROL, CURSOR_EN, mask_sh), \ + IPP_SF(CUR_CONTROL, CURSOR_MODE, mask_sh), \ + IPP_SF(CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \ + IPP_SF(CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \ + IPP_SF(CUR_POSITION, CURSOR_X_POSITION, mask_sh), \ + IPP_SF(CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \ + IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \ + IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ + IPP_SF(CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \ + IPP_SF(CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \ + IPP_SF(CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \ + IPP_SF(CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \ + IPP_SF(CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \ + IPP_SF(CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \ + IPP_SF(CUR_SIZE, CURSOR_WIDTH, mask_sh), \ + IPP_SF(CUR_SIZE, CURSOR_HEIGHT, mask_sh), \ + IPP_SF(CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \ + IPP_SF(CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \ + IPP_SF(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \ + IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \ + IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \ + IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \ + IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \ + IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \ + IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \ + IPP_SF(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \ + IPP_SF(DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \ + IPP_SF(DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \ + IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \ + IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \ + IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \ + IPP_SF(DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \ + IPP_SF(DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \ + IPP_SF(DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \ + IPP_SF(DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh) +#endif + #define IPP_REG_FIELD_LIST(type) \ type CURSOR_UPDATE_LOCK; \ type CURSOR_EN; \ @@ -233,6 +273,15 @@ void dce_ipp_construct(struct dce_ipp *ipp_dce, const struct dce_ipp_shift *ipp_shift, const struct dce_ipp_mask *ipp_mask); +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_ipp_construct(struct dce_ipp *ipp_dce, + struct dc_context *ctx, + int inst, + const struct dce_ipp_registers *regs, + const struct dce_ipp_shift *ipp_shift, + const struct dce_ipp_mask *ipp_mask); +#endif + void dce_ipp_destroy(struct input_pixel_processor **ipp); #endif /* _DCE_IPP_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 8d8c84c81b34e2105c0a320d9122ed5094beddc5..b409f6b2bfd832bea2f68da1e1404e0441ba4b5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -425,6 +425,59 @@ static void set_dp_phy_pattern_hbr2_compliance_cp2520_2( enable_phy_bypass_mode(enc110, false); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2( + struct dce110_link_encoder *enc110, + unsigned int cp2520_pattern) +{ + + /* previously there is a register DP_HBR2_EYE_PATTERN + * that is enabled to get the pattern. + * But it does not work with the latest spec change, + * so we are programming the following registers manually. + * + * The following settings have been confirmed + * by Nick Chorney and Sandra Liu */ + + /* Disable PHY Bypass mode to setup the test pattern */ + + enable_phy_bypass_mode(enc110, false); + + /* Setup DIG encoder in DP SST mode */ + enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT); + + /* ensure normal panel mode. */ + setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT); + + /* no vbid after BS (SR) + * DP_LINK_FRAMING_CNTL changed history Sandra Liu + * 11000260 / 11000104 / 110000FC */ + REG_UPDATE_3(DP_LINK_FRAMING_CNTL, + DP_IDLE_BS_INTERVAL, 0xFC, + DP_VBID_DISABLE, 1, + DP_VID_ENHANCED_FRAME_MODE, 1); + + /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip swap BS with SR */ + + /* select cp2520 patterns */ + if (REG(DP_DPHY_HBR2_PATTERN_CONTROL)) + REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL, + DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern); + else + /* pre-DCE11 can only generate CP2520 pattern 2 */ + ASSERT(cp2520_pattern == 2); + + /* set link training complete */ + set_link_training_complete(enc110, true); + + /* disable video stream */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); + + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc110, false); +} +#endif + static void set_dp_phy_pattern_passthrough_mode( struct dce110_link_encoder *enc110, enum dp_panel_mode panel_mode) @@ -452,6 +505,35 @@ static void set_dp_phy_pattern_passthrough_mode( disable_prbs_mode(enc110); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_set_dp_phy_pattern_passthrough_mode( + struct dce110_link_encoder *enc110, + enum dp_panel_mode panel_mode) +{ + /* program correct panel mode */ + setup_panel_mode(enc110, panel_mode); + + /* restore LINK_FRAMING_CNTL + * in case we were doing HBR2 compliance pattern before + */ + REG_UPDATE_3(DP_LINK_FRAMING_CNTL, + DP_IDLE_BS_INTERVAL, 0x2000, + DP_VBID_DISABLE, 0, + DP_VID_ENHANCED_FRAME_MODE, 1); + + /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip DPHY_SCRAMBLER_BS_COUNT restore */ + + /* set link training complete */ + set_link_training_complete(enc110, true); + + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc110, false); + + /* Disable PRBS mode */ + disable_prbs_mode(enc110); +} +#endif + /* return value is bit-vector */ static uint8_t get_frontend_source( enum engine_id engine) @@ -490,6 +572,20 @@ static void configure_encoder( REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_configure_encoder( + struct dce110_link_encoder *enc110, + const struct dc_link_settings *link_settings) +{ + /* set number of lanes */ + + REG_SET(DP_CONFIG, 0, + DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE); + + /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip setup scrambler */ +} +#endif + static void aux_initialize( struct dce110_link_encoder *enc110) { @@ -1059,6 +1155,87 @@ void dce110_link_encoder_enable_dp_mst_output( BREAK_TO_DEBUGGER(); } } + +#if defined(CONFIG_DRM_AMD_DC_SI) +/* enables DP PHY output */ +void dce60_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Enable the PHY */ + + /* number_of_lanes is used for pixel clock adjust, + * but it's not passed to asic_control. + * We need to set number of lanes manually. + */ + dce60_configure_encoder(enc110, link_settings); + cntl.connector_obj_id = enc110->base.connector; + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = enc->preferred_engine; + cntl.transmitter = enc110->base.transmitter; + cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc110->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate + * LINK_RATE_REF_FREQ_IN_KHZ; + /* TODO: check if undefined works */ + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + + result = link_transmitter_control(enc110, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + +/* enables DP PHY output in MST mode */ +void dce60_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Enable the PHY */ + + /* number_of_lanes is used for pixel clock adjust, + * but it's not passed to asic_control. + * We need to set number of lanes manually. + */ + dce60_configure_encoder(enc110, link_settings); + + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = ENGINE_ID_UNKNOWN; + cntl.transmitter = enc110->base.transmitter; + cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc110->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate + * LINK_RATE_REF_FREQ_IN_KHZ; + /* TODO: check if undefined works */ + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + + result = link_transmitter_control(enc110, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} +#endif + /* * @brief * Disable transmitter and its encoder @@ -1208,6 +1385,63 @@ void dce110_link_encoder_dp_set_phy_pattern( } } +#if defined(CONFIG_DRM_AMD_DC_SI) +/* set DP PHY test and training patterns */ +void dce60_link_encoder_dp_set_phy_pattern( + struct link_encoder *enc, + const struct encoder_set_dp_phy_pattern_param *param) +{ + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); + + switch (param->dp_phy_pattern) { + case DP_TEST_PATTERN_TRAINING_PATTERN1: + dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0); + break; + case DP_TEST_PATTERN_TRAINING_PATTERN2: + dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1); + break; + case DP_TEST_PATTERN_TRAINING_PATTERN3: + dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2); + break; + case DP_TEST_PATTERN_TRAINING_PATTERN4: + dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3); + break; + case DP_TEST_PATTERN_D102: + set_dp_phy_pattern_d102(enc110); + break; + case DP_TEST_PATTERN_SYMBOL_ERROR: + set_dp_phy_pattern_symbol_error(enc110); + break; + case DP_TEST_PATTERN_PRBS7: + set_dp_phy_pattern_prbs7(enc110); + break; + case DP_TEST_PATTERN_80BIT_CUSTOM: + set_dp_phy_pattern_80bit_custom( + enc110, param->custom_pattern); + break; + case DP_TEST_PATTERN_CP2520_1: + dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1); + break; + case DP_TEST_PATTERN_CP2520_2: + dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2); + break; + case DP_TEST_PATTERN_CP2520_3: + dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3); + break; + case DP_TEST_PATTERN_VIDEO_MODE: { + dce60_set_dp_phy_pattern_passthrough_mode( + enc110, param->dp_panel_mode); + break; + } + + default: + /* invalid phy pattern */ + ASSERT_CRITICAL(false); + break; + } +} +#endif + static void fill_stream_allocation_row_info( const struct link_mst_stream_allocation *stream_allocation, uint32_t *src, @@ -1407,3 +1641,138 @@ void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc, *link_settings = max_link_cap; } + +#if defined(CONFIG_DRM_AMD_DC_SI) +static const struct link_encoder_funcs dce60_lnk_enc_funcs = { + .validate_output_with_stream = + dce110_link_encoder_validate_output_with_stream, + .hw_init = dce110_link_encoder_hw_init, + .setup = dce110_link_encoder_setup, + .enable_tmds_output = dce110_link_encoder_enable_tmds_output, + .enable_dp_output = dce60_link_encoder_enable_dp_output, + .enable_dp_mst_output = dce60_link_encoder_enable_dp_mst_output, + .enable_lvds_output = dce110_link_encoder_enable_lvds_output, + .disable_output = dce110_link_encoder_disable_output, + .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dce60_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dce110_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dce110_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dce110_psr_program_secondary_packet, + .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dce110_link_encoder_enable_hpd, + .disable_hpd = dce110_link_encoder_disable_hpd, + .is_dig_enabled = dce110_is_dig_enabled, + .destroy = dce110_link_encoder_destroy, + .get_max_link_cap = dce110_link_encoder_get_max_link_cap +}; + +void dce60_link_encoder_construct( + struct dce110_link_encoder *enc110, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dce110_link_enc_registers *link_regs, + const struct dce110_link_enc_aux_registers *aux_regs, + const struct dce110_link_enc_hpd_registers *hpd_regs) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + + enc110->base.funcs = &dce60_lnk_enc_funcs; + enc110->base.ctx = init_data->ctx; + enc110->base.id = init_data->encoder; + + enc110->base.hpd_source = init_data->hpd_source; + enc110->base.connector = init_data->connector; + + enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc110->base.features = *enc_features; + + enc110->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc110->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc110->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc110->link_regs = link_regs; + enc110->aux_regs = aux_regs; + enc110->hpd_regs = hpd_regs; + + switch (enc110->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc110->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc110->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc110->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc110->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc110->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc110->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc110->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc110->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios, + enc110->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (BP_RESULT_OK == result) { + enc110->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc110->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc110->base.ctx->dc->debug.hdmi20_disable) { + enc110->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 66027d496778a6353e2a00496632945d297454c0..cb714a48b171c39079694895842b7d6bae6af47d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -76,6 +76,34 @@ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ SR(DCI_MEM_PWR_STATUS) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define LE_DCE60_REG_LIST(id)\ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ + SR(DMCU_RAM_ACCESS_CTRL), \ + SR(DMCU_IRAM_RD_CTRL), \ + SR(DMCU_IRAM_RD_DATA), \ + SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \ + SRI(DIG_BE_CNTL, DIG, id), \ + SRI(DIG_BE_EN_CNTL, DIG, id), \ + SRI(DP_CONFIG, DP, id), \ + SRI(DP_DPHY_CNTL, DP, id), \ + SRI(DP_DPHY_PRBS_CNTL, DP, id), \ + SRI(DP_DPHY_SYM0, DP, id), \ + SRI(DP_DPHY_SYM1, DP, id), \ + SRI(DP_DPHY_SYM2, DP, id), \ + SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \ + SRI(DP_LINK_CNTL, DP, id), \ + SRI(DP_LINK_FRAMING_CNTL, DP, id), \ + SRI(DP_MSE_SAT0, DP, id), \ + SRI(DP_MSE_SAT1, DP, id), \ + SRI(DP_MSE_SAT2, DP, id), \ + SRI(DP_MSE_SAT_UPDATE, DP, id), \ + SRI(DP_SEC_CNTL, DP, id), \ + SRI(DP_VID_STREAM_CNTL, DP, id), \ + SRI(DP_DPHY_FAST_TRAINING, DP, id), \ + SRI(DP_SEC_CNTL1, DP, id) +#endif + #define LE_DCE80_REG_LIST(id)\ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ LE_COMMON_REG_LIST_BASE(id) @@ -171,6 +199,16 @@ void dce110_link_encoder_construct( const struct dce110_link_enc_aux_registers *aux_regs, const struct dce110_link_enc_hpd_registers *hpd_regs); +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_link_encoder_construct( + struct dce110_link_encoder *enc110, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dce110_link_enc_registers *link_regs, + const struct dce110_link_enc_aux_registers *aux_regs, + const struct dce110_link_enc_hpd_registers *hpd_regs); +#endif + bool dce110_link_encoder_validate_dvi_output( const struct dce110_link_encoder *enc110, enum signal_type connector_signal, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 51481e922eb9ca2a143b8f582ea8453dd92780f5..79a6f261a0da99a5854d9cb6c3d570095f1bbdeb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -174,6 +174,22 @@ static void program_urgency_watermark( URGENCY_HIGH_WATERMARK, urgency_high_wm); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_program_urgency_watermark( + struct dce_mem_input *dce_mi, + uint32_t wm_select, + uint32_t urgency_low_wm, + uint32_t urgency_high_wm) +{ + REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL3, + URGENCY_WATERMARK_MASK, wm_select); + + REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0, + URGENCY_LOW_WATERMARK, urgency_low_wm, + URGENCY_HIGH_WATERMARK, urgency_high_wm); +} +#endif + static void dce120_program_urgency_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, @@ -193,6 +209,25 @@ static void dce120_program_urgency_watermark( } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_program_nbp_watermark( + struct dce_mem_input *dce_mi, + uint32_t wm_select, + uint32_t nbp_wm) +{ + REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, + NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select); + + REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, + NB_PSTATE_CHANGE_ENABLE, 1, + NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1, + NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1); + + REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, + NB_PSTATE_CHANGE_WATERMARK, nbp_wm); +} +#endif + static void program_nbp_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, @@ -225,6 +260,20 @@ static void program_nbp_watermark( } } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_program_stutter_watermark( + struct dce_mem_input *dce_mi, + uint32_t wm_select, + uint32_t stutter_mark) +{ + REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, + STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select); + + REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, + STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); +} +#endif + static void dce120_program_stutter_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, @@ -286,6 +335,34 @@ static void dce_mi_program_display_marks( program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */ } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_mi_program_display_marks( + struct mem_input *mi, + struct dce_watermarks nbp, + struct dce_watermarks stutter_exit, + struct dce_watermarks stutter_enter, + struct dce_watermarks urgent, + uint32_t total_dest_line_time_ns) +{ + struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); + uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; + + dce60_program_urgency_watermark(dce_mi, 2, /* set a */ + urgent.a_mark, total_dest_line_time_ns); + dce60_program_urgency_watermark(dce_mi, 1, /* set d */ + urgent.d_mark, total_dest_line_time_ns); + + REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE, stutter_en, + STUTTER_IGNORE_FBC, 1); + dce60_program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */ + dce60_program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */ + + dce60_program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */ + dce60_program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */ +} +#endif + static void dce112_mi_program_display_marks(struct mem_input *mi, struct dce_watermarks nbp, struct dce_watermarks stutter_exit, @@ -369,7 +446,7 @@ static void program_tiling( */ } - if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */ + if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */ REG_UPDATE_9(GRPH_CONTROL, GRPH_NUM_BANKS, info->gfx8.num_banks, GRPH_BANK_WIDTH, info->gfx8.bank_width, @@ -385,6 +462,23 @@ static void program_tiling( GRPH_Z, 0); */ } + + if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX6 but reuses gfx8 struct */ + REG_UPDATE_8(GRPH_CONTROL, + GRPH_NUM_BANKS, info->gfx8.num_banks, + GRPH_BANK_WIDTH, info->gfx8.bank_width, + GRPH_BANK_HEIGHT, info->gfx8.bank_height, + GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect, + GRPH_TILE_SPLIT, info->gfx8.tile_split, + /* DCE6 has no GRPH_MICRO_TILE_MODE mask */ + GRPH_PIPE_CONFIG, info->gfx8.pipe_config, + GRPH_ARRAY_MODE, info->gfx8.array_mode, + GRPH_COLOR_EXPANSION_MODE, 1); + /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */ + /* + GRPH_Z, 0); + */ + } } @@ -429,6 +523,36 @@ static void program_size_and_rotation( GRPH_ROTATION_ANGLE, rotation_angles[rotation]); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_program_size( + struct dce_mem_input *dce_mi, + enum dc_rotation_angle rotation, /* not used in DCE6 */ + const struct plane_size *plane_size) +{ + struct rect hw_rect = plane_size->surface_size; + /* DCE6 has no HW rotation, skip rotation_angles declaration */ + + /* DCE6 has no HW rotation, skip ROTATION_ANGLE_* processing */ + + REG_SET(GRPH_X_START, 0, + GRPH_X_START, hw_rect.x); + + REG_SET(GRPH_Y_START, 0, + GRPH_Y_START, hw_rect.y); + + REG_SET(GRPH_X_END, 0, + GRPH_X_END, hw_rect.width); + + REG_SET(GRPH_Y_END, 0, + GRPH_Y_END, hw_rect.height); + + REG_SET(GRPH_PITCH, 0, + GRPH_PITCH, plane_size->surface_pitch); + + /* DCE6 has no HW_ROTATION register, skip setting rotation_angles */ +} +#endif + static void program_grph_pixel_format( struct dce_mem_input *dce_mi, enum surface_pixel_format format) @@ -521,6 +645,28 @@ static void dce_mi_program_surface_config( program_grph_pixel_format(dce_mi, format); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_mi_program_surface_config( + struct mem_input *mi, + enum surface_pixel_format format, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + enum dc_rotation_angle rotation, /* not used in DCE6 */ + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror) +{ + struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); + REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1); + + program_tiling(dce_mi, tiling_info); + dce60_program_size(dce_mi, rotation, plane_size); + + if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN && + format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + program_grph_pixel_format(dce_mi, format); +} +#endif + static uint32_t get_dmif_switch_time_us( uint32_t h_total, uint32_t v_total, @@ -741,6 +887,20 @@ static const struct mem_input_funcs dce_mi_funcs = { .mem_input_is_flip_pending = dce_mi_is_flip_pending }; +#if defined(CONFIG_DRM_AMD_DC_SI) +static const struct mem_input_funcs dce60_mi_funcs = { + .mem_input_program_display_marks = dce60_mi_program_display_marks, + .allocate_mem_input = dce_mi_allocate_dmif, + .free_mem_input = dce_mi_free_dmif, + .mem_input_program_surface_flip_and_addr = + dce_mi_program_surface_flip_and_addr, + .mem_input_program_pte_vm = dce_mi_program_pte_vm, + .mem_input_program_surface_config = + dce60_mi_program_surface_config, + .mem_input_is_flip_pending = dce_mi_is_flip_pending +}; +#endif + static const struct mem_input_funcs dce112_mi_funcs = { .mem_input_program_display_marks = dce112_mi_program_display_marks, .allocate_mem_input = dce_mi_allocate_dmif, @@ -783,6 +943,20 @@ void dce_mem_input_construct( dce_mi->masks = mi_mask; } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_mem_input_construct( + struct dce_mem_input *dce_mi, + struct dc_context *ctx, + int inst, + const struct dce_mem_input_registers *regs, + const struct dce_mem_input_shift *mi_shift, + const struct dce_mem_input_mask *mi_mask) +{ + dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); + dce_mi->base.funcs = &dce60_mi_funcs; +} +#endif + void dce112_mem_input_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h index d15b0d7f47fcd6a8658e440cbb547618d8fe5d02..23db5c72f07ed72a0dc386f56066a3f480506700 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h @@ -58,6 +58,31 @@ SRI(DVMM_PTE_CONTROL, DCP, id),\ SRI(DVMM_PTE_ARB_CONTROL, DCP, id) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define MI_DCE6_REG_LIST(id)\ + SRI(GRPH_ENABLE, DCP, id),\ + SRI(GRPH_CONTROL, DCP, id),\ + SRI(GRPH_X_START, DCP, id),\ + SRI(GRPH_Y_START, DCP, id),\ + SRI(GRPH_X_END, DCP, id),\ + SRI(GRPH_Y_END, DCP, id),\ + SRI(GRPH_PITCH, DCP, id),\ + SRI(GRPH_SWAP_CNTL, DCP, id),\ + SRI(PRESCALE_GRPH_CONTROL, DCP, id),\ + SRI(GRPH_UPDATE, DCP, id),\ + SRI(GRPH_FLIP_CONTROL, DCP, id),\ + SRI(GRPH_PRIMARY_SURFACE_ADDRESS, DCP, id),\ + SRI(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, DCP, id),\ + SRI(GRPH_SECONDARY_SURFACE_ADDRESS, DCP, id),\ + SRI(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, DCP, id),\ + SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\ + SRI(DPG_PIPE_ARBITRATION_CONTROL3, DMIF_PG, id),\ + SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id),\ + SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\ + SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\ + SRI(DMIF_BUFFER_CONTROL, PIPE, id) +#endif + #define MI_DCE8_REG_LIST(id)\ MI_DCE_BASE_REG_LIST(id),\ SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id) @@ -104,6 +129,9 @@ struct dce_mem_input_registers { uint32_t GRPH_SECONDARY_SURFACE_ADDRESS_HIGH; /* DMIF_PG */ uint32_t DPG_PIPE_ARBITRATION_CONTROL1; +#if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t DPG_PIPE_ARBITRATION_CONTROL3; +#endif uint32_t DPG_WATERMARK_MASK_CONTROL; uint32_t DPG_PIPE_URGENCY_CONTROL; uint32_t DPG_PIPE_URGENT_LEVEL_CONTROL; @@ -126,6 +154,18 @@ struct dce_mem_input_registers { #define SFB(blk_name, reg_name, field_name, post_fix)\ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix +#if defined(CONFIG_DRM_AMD_DC_SI) +#define MI_GFX6_TILE_MASK_SH_LIST(mask_sh, blk)\ + SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh) +#endif + #define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\ @@ -162,6 +202,32 @@ struct dce_mem_input_registers { SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\ SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define MI_DCP_MASK_SH_LIST_DCE6(mask_sh, blk)\ + SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\ + SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\ + SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\ + SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\ + SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\ + SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\ + SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\ + SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\ + SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\ + SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\ + SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\ + SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh),\ + SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\ + SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_SURFACE_ADDRESS, mask_sh),\ + SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\ + SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS, GRPH_PRIMARY_SURFACE_ADDRESS, mask_sh),\ + SFB(blk, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, mask_sh),\ + SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\ + SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh) +#endif + #define MI_DCP_DCE11_MASK_SH_LIST(mask_sh, blk)\ SFB(blk, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, mask_sh) @@ -172,6 +238,33 @@ struct dce_mem_input_registers { SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, mask_sh),\ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define MI_DMIF_PG_MASK_SH_LIST_DCE6(mask_sh, blk)\ + SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\ + SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\ + SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\ + SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\ + SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh) + +#define MI_DMIF_PG_MASK_SH_DCE6(mask_sh, blk)\ + SFB(blk, DPG_PIPE_ARBITRATION_CONTROL3, URGENCY_WATERMARK_MASK, mask_sh),\ + SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\ + SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\ + SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\ + SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\ + SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\ + SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh) + +#define MI_DCE6_MASK_SH_LIST(mask_sh)\ + MI_DCP_MASK_SH_LIST_DCE6(mask_sh, ),\ + MI_DMIF_PG_MASK_SH_LIST_DCE6(mask_sh, ),\ + MI_DMIF_PG_MASK_SH_DCE6(mask_sh, ),\ + MI_GFX6_TILE_MASK_SH_LIST(mask_sh, ) +#endif + #define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\ SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\ SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\ @@ -345,6 +438,16 @@ void dce_mem_input_construct( const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask); +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_mem_input_construct( + struct dce_mem_input *dce_mi, + struct dc_context *ctx, + int inst, + const struct dce_mem_input_registers *regs, + const struct dce_mem_input_shift *mi_shift, + const struct dce_mem_input_mask *mi_mask); +#endif + void dce112_mem_input_construct( struct dce_mem_input *dce_mi, struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c index 51081d9ae3fb7c3a02af69d8450d2e160e401e61..e459ae65aaf7641368eb3465caa078470820f83a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c @@ -141,6 +141,47 @@ static void set_truncation( params->flags.TRUNCATE_MODE); } +#if defined(CONFIG_DRM_AMD_DC_SI) +/** + * dce60_set_truncation + * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp + * 2) enable truncation + * 3) HW remove 12bit FMT support for DCE11 power saving reason. + */ +static void dce60_set_truncation( + struct dce110_opp *opp110, + const struct bit_depth_reduction_params *params) +{ + /* DCE6 has no FMT_TRUNCATE_MODE bit in FMT_BIT_DEPTH_CONTROL reg */ + + /*Disable truncation*/ + REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, + FMT_TRUNCATE_EN, 0, + FMT_TRUNCATE_DEPTH, 0); + + if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + /* 8bpc trunc on YCbCr422*/ + if (params->flags.TRUNCATE_DEPTH == 1) + REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, + FMT_TRUNCATE_EN, 1, + FMT_TRUNCATE_DEPTH, 1); + else if (params->flags.TRUNCATE_DEPTH == 2) + /* 10bpc trunc on YCbCr422*/ + REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, + FMT_TRUNCATE_EN, 1, + FMT_TRUNCATE_DEPTH, 2); + return; + } + /* on other format-to do */ + if (params->flags.TRUNCATE_ENABLED == 0) + return; + /*Set truncation depth and Enable truncation*/ + REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL, + FMT_TRUNCATE_EN, 1, + FMT_TRUNCATE_DEPTH, + params->flags.TRUNCATE_DEPTH); +} +#endif /** * set_spatial_dither @@ -373,6 +414,57 @@ void dce110_opp_set_clamping( } } +#if defined(CONFIG_DRM_AMD_DC_SI) +/** + * Set Clamping for DCE6 parts + * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping) + * 1 for 8 bpc + * 2 for 10 bpc + * 3 for 12 bpc + * 7 for programable + * 2) Enable clamp if Limited range requested + */ +void dce60_opp_set_clamping( + struct dce110_opp *opp110, + const struct clamping_and_pixel_encoding_params *params) +{ + REG_SET_2(FMT_CLAMP_CNTL, 0, + FMT_CLAMP_DATA_EN, 0, + FMT_CLAMP_COLOR_FORMAT, 0); + + switch (params->clamping_level) { + case CLAMPING_FULL_RANGE: + break; + case CLAMPING_LIMITED_RANGE_8BPC: + REG_SET_2(FMT_CLAMP_CNTL, 0, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 1); + break; + case CLAMPING_LIMITED_RANGE_10BPC: + REG_SET_2(FMT_CLAMP_CNTL, 0, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 2); + break; + case CLAMPING_LIMITED_RANGE_12BPC: + REG_SET_2(FMT_CLAMP_CNTL, 0, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 3); + break; + case CLAMPING_LIMITED_RANGE_PROGRAMMABLE: + /*Set clamp control*/ + REG_SET_2(FMT_CLAMP_CNTL, 0, + FMT_CLAMP_DATA_EN, 1, + FMT_CLAMP_COLOR_FORMAT, 7); + + /* DCE6 does have FMT_CLAMP_COMPONENT_{R,G,B} registers */ + + break; + default: + break; + } +} +#endif + /** * set_pixel_encoding * @@ -408,6 +500,39 @@ static void set_pixel_encoding( } +#if defined(CONFIG_DRM_AMD_DC_SI) +/** + * dce60_set_pixel_encoding + * DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg + * Set Pixel Encoding + * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly + * 1: YCbCr 4:2:2 + */ +static void dce60_set_pixel_encoding( + struct dce110_opp *opp110, + const struct clamping_and_pixel_encoding_params *params) +{ + if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS) + REG_UPDATE_2(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); + else + REG_UPDATE(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0); + + if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + REG_UPDATE(FMT_CONTROL, + FMT_PIXEL_ENCODING, 1); + } + if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + REG_UPDATE_2(FMT_CONTROL, + FMT_PIXEL_ENCODING, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); + } + +} +#endif + void dce110_opp_program_bit_depth_reduction( struct output_pixel_processor *opp, const struct bit_depth_reduction_params *params) @@ -419,6 +544,19 @@ void dce110_opp_program_bit_depth_reduction( set_temporal_dither(opp110, params); } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_opp_program_bit_depth_reduction( + struct output_pixel_processor *opp, + const struct bit_depth_reduction_params *params) +{ + struct dce110_opp *opp110 = TO_DCE110_OPP(opp); + + dce60_set_truncation(opp110, params); + set_spatial_dither(opp110, params); + set_temporal_dither(opp110, params); +} +#endif + void dce110_opp_program_clamping_and_pixel_encoding( struct output_pixel_processor *opp, const struct clamping_and_pixel_encoding_params *params) @@ -429,6 +567,19 @@ void dce110_opp_program_clamping_and_pixel_encoding( set_pixel_encoding(opp110, params); } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_opp_program_clamping_and_pixel_encoding( + struct output_pixel_processor *opp, + const struct clamping_and_pixel_encoding_params *params) +{ + struct dce110_opp *opp110 = TO_DCE110_OPP(opp); + + dce60_opp_set_clamping(opp110, params); + dce60_set_pixel_encoding(opp110, params); +} +#endif + + static void program_formatter_420_memory(struct output_pixel_processor *opp) { struct dce110_opp *opp110 = TO_DCE110_OPP(opp); @@ -526,7 +677,32 @@ void dce110_opp_program_fmt( return; } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_opp_program_fmt( + struct output_pixel_processor *opp, + struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping) +{ + /* dithering is affected by , hence should be + * programmed afterwards */ + + if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) + program_formatter_420_memory(opp); + + dce60_opp_program_bit_depth_reduction( + opp, + fmt_bit_depth); + + dce60_opp_program_clamping_and_pixel_encoding( + opp, + clamping); + + if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420) + program_formatter_reset_dig_resync_fifo(opp); + return; +} +#endif @@ -541,6 +717,15 @@ static const struct opp_funcs funcs = { .opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction }; +#if defined(CONFIG_DRM_AMD_DC_SI) +static const struct opp_funcs dce60_opp_funcs = { + .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion, + .opp_destroy = dce110_opp_destroy, + .opp_program_fmt = dce60_opp_program_fmt, + .opp_program_bit_depth_reduction = dce60_opp_program_bit_depth_reduction +}; +#endif + void dce110_opp_construct(struct dce110_opp *opp110, struct dc_context *ctx, uint32_t inst, @@ -559,6 +744,26 @@ void dce110_opp_construct(struct dce110_opp *opp110, opp110->opp_mask = opp_mask; } +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_opp_construct(struct dce110_opp *opp110, + struct dc_context *ctx, + uint32_t inst, + const struct dce_opp_registers *regs, + const struct dce_opp_shift *opp_shift, + const struct dce_opp_mask *opp_mask) +{ + opp110->base.funcs = &dce60_opp_funcs; + + opp110->base.ctx = ctx; + + opp110->base.inst = inst; + + opp110->regs = regs; + opp110->opp_shift = opp_shift; + opp110->opp_mask = opp_mask; +} +#endif + void dce110_opp_destroy(struct output_pixel_processor **opp) { if (*opp) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h index 2ab0147cbd9d4fc28442a5de1240bf9fa07369f8..4d484ef60f35727a41e56ea225ccdb337f617d28 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h @@ -81,6 +81,17 @@ enum dce110_opp_reg_type { OPP_COMMON_REG_LIST_BASE(id), \ SRI(CONTROL, FMT_MEMORY, id) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define OPP_DCE_60_REG_LIST(id) \ + SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \ + SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \ + SRI(FMT_CONTROL, FMT, id), \ + SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \ + SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \ + SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \ + SRI(FMT_CLAMP_CNTL, FMT, id) +#endif + #define OPP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -192,6 +203,35 @@ enum dce110_opp_reg_type { OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh),\ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define OPP_COMMON_MASK_SH_LIST_DCE_60(mask_sh)\ + OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\ + OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\ + OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\ + OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\ + OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\ + OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\ + OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\ + OPP_SF(FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh) +#endif + #define OPP_REG_FIELD_LIST(type) \ type FMT_DYNAMIC_EXP_EN; \ type FMT_DYNAMIC_EXP_MODE; \ @@ -279,6 +319,15 @@ void dce110_opp_construct(struct dce110_opp *opp110, const struct dce_opp_shift *opp_shift, const struct dce_opp_mask *opp_mask); +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_opp_construct(struct dce110_opp *opp110, + struct dc_context *ctx, + uint32_t inst, + const struct dce_opp_registers *regs, + const struct dce_opp_shift *opp_shift, + const struct dce_opp_mask *opp_mask); +#endif + void dce110_opp_destroy(struct output_pixel_processor **opp); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index 43781e77be431e08dc86df06ffbee0e120fbff17..df7f826eebd86f4ede2cdc618f059240a9431e12 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -46,13 +46,14 @@ #define FN(reg_name, field_name) \ dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name -static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl) +static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { uint64_t current_backlight; uint32_t round_result; uint32_t pwm_period_cntl, bl_period, bl_int_count; uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en; uint32_t bl_period_mask, bl_pwm_mask; + struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl); pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL); REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period); @@ -75,7 +76,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d else bl_pwm &= 0xFFFF; - current_backlight = bl_pwm << (1 + bl_int_count); + current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count); if (bl_period == 0) bl_period = 0xFFFF; @@ -150,7 +151,7 @@ static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl) REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); - current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl); + current_backlight = dce_get_16_bit_backlight_from_pwm(panel_cntl); return current_backlight; } @@ -273,6 +274,7 @@ static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = { .is_panel_powered_on = dce_is_panel_powered_on, .store_backlight_level = dce_store_backlight_level, .driver_set_backlight = dce_driver_set_backlight, + .get_current_backlight = dce_get_16_bit_backlight_from_pwm, }; void dce_panel_cntl_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h index 70ec691e14d2d4e07c40b19f7fb6da23e84f7535..99c68ca9c7e00e9f4b93e9a313b3ca6bcf728b9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h @@ -49,7 +49,7 @@ #define DCN_PANEL_CNTL_REG_LIST()\ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \ - DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \ + DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \ SR(BL_PWM_CNTL), \ SR(BL_PWM_CNTL2), \ SR(BL_PWM_PERIOD_CNTL), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index ab63d0d0304cb1012164edf8017b1d89aad638e6..2a32b66959ba29377d0ae31444915d6f29e0ec25 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -146,6 +146,33 @@ static bool setup_scaling_configuration( return true; } +#if defined(CONFIG_DRM_AMD_DC_SI) +static bool dce60_setup_scaling_configuration( + struct dce_transform *xfm_dce, + const struct scaler_data *data) +{ + REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0); + + if (data->taps.h_taps + data->taps.v_taps <= 2) { + /* Set bypass */ + + /* DCE6 has no SCL_MODE register, skip scale mode programming */ + + return false; + } + + REG_SET_2(SCL_TAP_CONTROL, 0, + SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1, + SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); + + /* DCE6 has no SCL_MODE register, skip scale mode programming */ + + /* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */ + + return true; +} +#endif + static void program_overscan( struct dce_transform *xfm_dce, const struct scaler_data *data) @@ -279,6 +306,36 @@ static void calculate_inits( inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5; } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_calculate_inits( + struct dce_transform *xfm_dce, + const struct scaler_data *data, + struct sclh_ratios_inits *inits) +{ + struct fixed31_32 v_init; + + inits->h_int_scale_ratio = + dc_fixpt_u2d19(data->ratios.horz) << 5; + inits->v_int_scale_ratio = + dc_fixpt_u2d19(data->ratios.vert) << 5; + + /* DCE6 h_init_luma setting inspired by DCE110 */ + inits->h_init_luma.integer = 1; + + /* DCE6 h_init_chroma setting inspired by DCE110 */ + inits->h_init_chroma.integer = 1; + + v_init = + dc_fixpt_div_int( + dc_fixpt_add( + data->ratios.vert, + dc_fixpt_from_int(data->taps.v_taps + 1)), + 2); + inits->v_init.integer = dc_fixpt_floor(v_init); + inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5; +} +#endif + static void program_scl_ratios_inits( struct dce_transform *xfm_dce, struct scl_ratios_inits *inits) @@ -301,6 +358,36 @@ static void program_scl_ratios_inits( REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_program_scl_ratios_inits( + struct dce_transform *xfm_dce, + struct sclh_ratios_inits *inits) +{ + + REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, + SCL_H_SCALE_RATIO, inits->h_int_scale_ratio); + + REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, + SCL_V_SCALE_RATIO, inits->v_int_scale_ratio); + + /* DCE6 has SCL_HORZ_FILTER_INIT_RGB_LUMA register */ + REG_SET_2(SCL_HORZ_FILTER_INIT_RGB_LUMA, 0, + SCL_H_INIT_INT_RGB_Y, inits->h_init_luma.integer, + SCL_H_INIT_FRAC_RGB_Y, inits->h_init_luma.fraction); + + /* DCE6 has SCL_HORZ_FILTER_INIT_CHROMA register */ + REG_SET_2(SCL_HORZ_FILTER_INIT_CHROMA, 0, + SCL_H_INIT_INT_CBCR, inits->h_init_chroma.integer, + SCL_H_INIT_FRAC_CBCR, inits->h_init_chroma.fraction); + + REG_SET_2(SCL_VERT_FILTER_INIT, 0, + SCL_V_INIT_INT, inits->v_init.integer, + SCL_V_INIT_FRAC, inits->v_init.fraction); + + REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); +} +#endif + static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio) { if (taps == 4) @@ -399,6 +486,91 @@ static void dce_transform_set_scaler( REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en); } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_transform_set_scaler( + struct transform *xfm, + const struct scaler_data *data) +{ + struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); + bool is_scaling_required; + bool filter_updated = false; + const uint16_t *coeffs_v, *coeffs_h; + + /*Use whole line buffer memory always*/ + REG_SET(DC_LB_MEMORY_SPLIT, 0, + DC_LB_MEMORY_CONFIG, 0); + + REG_SET(DC_LB_MEM_SIZE, 0, + DC_LB_MEM_SIZE, xfm_dce->lb_memory_size); + + /* Clear SCL_F_SHARP_CONTROL value to 0 */ + REG_WRITE(SCL_F_SHARP_CONTROL, 0); + + /* 1. Program overscan */ + program_overscan(xfm_dce, data); + + /* 2. Program taps and configuration */ + is_scaling_required = dce60_setup_scaling_configuration(xfm_dce, data); + + if (is_scaling_required) { + /* 3. Calculate and program ratio, DCE6 filter initialization */ + struct sclh_ratios_inits inits = { 0 }; + + /* DCE6 has specific calculate_inits() function */ + dce60_calculate_inits(xfm_dce, data, &inits); + + /* DCE6 has specific program_scl_ratios_inits() function */ + dce60_program_scl_ratios_inits(xfm_dce, &inits); + + coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert); + coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz); + + if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) { + /* 4. Program vertical filters */ + if (xfm_dce->filter_v == NULL) + REG_SET(SCL_VERT_FILTER_CONTROL, 0, + SCL_V_2TAP_HARDCODE_COEF_EN, 0); + program_multi_taps_filter( + xfm_dce, + data->taps.v_taps, + coeffs_v, + FILTER_TYPE_RGB_Y_VERTICAL); + program_multi_taps_filter( + xfm_dce, + data->taps.v_taps, + coeffs_v, + FILTER_TYPE_ALPHA_VERTICAL); + + /* 5. Program horizontal filters */ + if (xfm_dce->filter_h == NULL) + REG_SET(SCL_HORZ_FILTER_CONTROL, 0, + SCL_H_2TAP_HARDCODE_COEF_EN, 0); + program_multi_taps_filter( + xfm_dce, + data->taps.h_taps, + coeffs_h, + FILTER_TYPE_RGB_Y_HORIZONTAL); + program_multi_taps_filter( + xfm_dce, + data->taps.h_taps, + coeffs_h, + FILTER_TYPE_ALPHA_HORIZONTAL); + + xfm_dce->filter_v = coeffs_v; + xfm_dce->filter_h = coeffs_h; + filter_updated = true; + } + } + + /* 6. Program the viewport */ + program_viewport(xfm_dce, &data->viewport); + + /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */ + + /* DCE6 DATA_FORMAT register does not support ALPHA_EN */ +} +#endif + /***************************************************************************** * set_clamp * @@ -664,6 +836,67 @@ static void program_bit_depth_reduction( bit_depth_params->flags.HIGHPASS_RANDOM); } +#if defined(CONFIG_DRM_AMD_DC_SI) +/***************************************************************************** + * dce60_transform_bit_depth_reduction program + * + * @brief + * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate, + * Dither) for dce + * + * @param depth : bit depth to set the clamp to (should match denorm) + * + ******************************************************************************/ +static void dce60_program_bit_depth_reduction( + struct dce_transform *xfm_dce, + enum dc_color_depth depth, + const struct bit_depth_reduction_params *bit_depth_params) +{ + enum dcp_out_trunc_round_depth trunc_round_depth; + enum dcp_out_trunc_round_mode trunc_mode; + bool spatial_dither_enable; + + ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */ + + spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED; + /* Default to 12 bit truncation without rounding */ + trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT; + trunc_mode = DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE; + + if (bit_depth_params->flags.TRUNCATE_ENABLED) { + /* Don't enable dithering if truncation is enabled */ + spatial_dither_enable = false; + trunc_mode = bit_depth_params->flags.TRUNCATE_MODE ? + DCP_OUT_TRUNC_ROUND_MODE_ROUND : + DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE; + + if (bit_depth_params->flags.TRUNCATE_DEPTH == 0 || + bit_depth_params->flags.TRUNCATE_DEPTH == 1) + trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_8BIT; + else if (bit_depth_params->flags.TRUNCATE_DEPTH == 2) + trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_10BIT; + else { + /* + * Invalid truncate/round depth. Setting here to 12bit + * to prevent use-before-initialize errors. + */ + trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT; + BREAK_TO_DEBUGGER(); + } + } + + /* DCE6 has no OUT_CLAMP_CONTROL_* registers - set_clamp() is skipped */ + set_round(xfm_dce, trunc_mode, trunc_round_depth); + set_dither(xfm_dce, + spatial_dither_enable, + DCP_SPATIAL_DITHER_MODE_A_AA_A, + DCP_SPATIAL_DITHER_DEPTH_30BPP, + bit_depth_params->flags.FRAME_RANDOM, + bit_depth_params->flags.RGB_RANDOM, + bit_depth_params->flags.HIGHPASS_RANDOM); +} +#endif + static int dce_transform_get_max_num_of_supported_lines( struct dce_transform *xfm_dce, enum lb_pixel_depth depth, @@ -797,6 +1030,59 @@ static void dce_transform_set_pixel_storage_depth( } } +#if defined(CONFIG_DRM_AMD_DC_SI) +static void dce60_transform_set_pixel_storage_depth( + struct transform *xfm, + enum lb_pixel_depth depth, + const struct bit_depth_reduction_params *bit_depth_params) +{ + struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); + int pixel_depth, expan_mode; + enum dc_color_depth color_depth; + + switch (depth) { + case LB_PIXEL_DEPTH_18BPP: + color_depth = COLOR_DEPTH_666; + pixel_depth = 2; + expan_mode = 1; + break; + case LB_PIXEL_DEPTH_24BPP: + color_depth = COLOR_DEPTH_888; + pixel_depth = 1; + expan_mode = 1; + break; + case LB_PIXEL_DEPTH_30BPP: + color_depth = COLOR_DEPTH_101010; + pixel_depth = 0; + expan_mode = 1; + break; + case LB_PIXEL_DEPTH_36BPP: + color_depth = COLOR_DEPTH_121212; + pixel_depth = 3; + expan_mode = 0; + break; + default: + color_depth = COLOR_DEPTH_101010; + pixel_depth = 0; + expan_mode = 1; + BREAK_TO_DEBUGGER(); + break; + } + + set_denormalization(xfm_dce, color_depth); + dce60_program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params); + + /* DATA_FORMAT in DCE6 does not have PIXEL_DEPTH and PIXEL_EXPAN_MODE masks */ + + if (!(xfm_dce->lb_pixel_depth_supported & depth)) { + /*we should use unsupported capabilities + * unless it is required by w/a*/ + DC_LOG_WARNING("%s: Capability not supported", + __func__); + } +} +#endif + static void program_gamut_remap( struct dce_transform *xfm_dce, const uint16_t *reg_val) @@ -1335,6 +1621,21 @@ static const struct transform_funcs dce_transform_funcs = { .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps }; +#if defined(CONFIG_DRM_AMD_DC_SI) +static const struct transform_funcs dce60_transform_funcs = { + .transform_reset = dce_transform_reset, + .transform_set_scaler = dce60_transform_set_scaler, + .transform_set_gamut_remap = dce_transform_set_gamut_remap, + .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment, + .opp_set_csc_default = dce110_opp_set_csc_default, + .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut, + .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl, + .opp_set_regamma_mode = dce110_opp_set_regamma_mode, + .transform_set_pixel_storage_depth = dce60_transform_set_pixel_storage_depth, + .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps +}; +#endif + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -1365,3 +1666,32 @@ void dce_transform_construct( xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY; xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/ } + +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_transform_construct( + struct dce_transform *xfm_dce, + struct dc_context *ctx, + uint32_t inst, + const struct dce_transform_registers *regs, + const struct dce_transform_shift *xfm_shift, + const struct dce_transform_mask *xfm_mask) +{ + xfm_dce->base.ctx = ctx; + + xfm_dce->base.inst = inst; + xfm_dce->base.funcs = &dce60_transform_funcs; + + xfm_dce->regs = regs; + xfm_dce->xfm_shift = xfm_shift; + xfm_dce->xfm_mask = xfm_mask; + + xfm_dce->prescaler_on = true; + xfm_dce->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP; + + xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY; + xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/ +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h index 948281d8b6afd514b76552b6e5c7bef6e41e2aa8..cbce194ec7b82b1452a8b888f4ca80e46088ee32 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h @@ -108,6 +108,68 @@ SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \ SRI(DCFE_MEM_PWR_STATUS, DCFE, id) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define XFM_COMMON_REG_LIST_DCE60_BASE(id) \ + SRI(DATA_FORMAT, LB, id), \ + SRI(GAMUT_REMAP_CONTROL, DCP, id), \ + SRI(GAMUT_REMAP_C11_C12, DCP, id), \ + SRI(GAMUT_REMAP_C13_C14, DCP, id), \ + SRI(GAMUT_REMAP_C21_C22, DCP, id), \ + SRI(GAMUT_REMAP_C23_C24, DCP, id), \ + SRI(GAMUT_REMAP_C31_C32, DCP, id), \ + SRI(GAMUT_REMAP_C33_C34, DCP, id), \ + SRI(OUTPUT_CSC_C11_C12, DCP, id), \ + SRI(OUTPUT_CSC_C13_C14, DCP, id), \ + SRI(OUTPUT_CSC_C21_C22, DCP, id), \ + SRI(OUTPUT_CSC_C23_C24, DCP, id), \ + SRI(OUTPUT_CSC_C31_C32, DCP, id), \ + SRI(OUTPUT_CSC_C33_C34, DCP, id), \ + SRI(OUTPUT_CSC_CONTROL, DCP, id), \ + SRI(REGAMMA_CNTLA_START_CNTL, DCP, id), \ + SRI(REGAMMA_CNTLA_SLOPE_CNTL, DCP, id), \ + SRI(REGAMMA_CNTLA_END_CNTL1, DCP, id), \ + SRI(REGAMMA_CNTLA_END_CNTL2, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_0_1, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_2_3, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_4_5, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_6_7, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_8_9, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_10_11, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_12_13, DCP, id), \ + SRI(REGAMMA_CNTLA_REGION_14_15, DCP, id), \ + SRI(REGAMMA_LUT_WRITE_EN_MASK, DCP, id), \ + SRI(REGAMMA_LUT_INDEX, DCP, id), \ + SRI(REGAMMA_LUT_DATA, DCP, id), \ + SRI(REGAMMA_CONTROL, DCP, id), \ + SRI(DENORM_CONTROL, DCP, id), \ + SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \ + SRI(OUT_ROUND_CONTROL, DCP, id), \ + SRI(SCL_TAP_CONTROL, SCL, id), \ + SRI(SCL_CONTROL, SCL, id), \ + SRI(SCL_BYPASS_CONTROL, SCL, id), \ + SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \ + SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \ + SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \ + SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \ + SRI(SCL_COEF_RAM_SELECT, SCL, id), \ + SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \ + SRI(VIEWPORT_START, SCL, id), \ + SRI(VIEWPORT_SIZE, SCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \ + SRI(SCL_VERT_FILTER_INIT, SCL, id), \ + SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \ + SRI(DC_LB_MEMORY_SPLIT, LB, id), \ + SRI(DC_LB_MEM_SIZE, LB, id), \ + SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id), \ + SRI(SCL_UPDATE, SCL, id), \ + SRI(SCL_F_SHARP_CONTROL, SCL, id) + +#define XFM_COMMON_REG_LIST_DCE60(id) \ + XFM_COMMON_REG_LIST_DCE60_BASE(id), \ + SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id) +#endif + #define XFM_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -204,6 +266,83 @@ XFM_SF(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, mask_sh),\ XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define XFM_COMMON_MASK_SH_LIST_DCE60(mask_sh) \ + XFM_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(mask_sh), \ + OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, mask_sh),\ + OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, DCP_LUT_LIGHT_SLEEP_DIS, mask_sh),\ + OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, mask_sh) + +#define XFM_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(mask_sh) \ + XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \ + XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \ + XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \ + XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \ + XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \ + XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \ + XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \ + XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \ + XFM_SF(DATA_FORMAT, INTERLEAVE_EN, mask_sh), \ + XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \ + XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \ + XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \ + XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \ + XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \ + XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \ + XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \ + XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \ + XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \ + XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \ + XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \ + XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \ + XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \ + XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\ + XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\ + XFM_SF(OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + XFM_SF(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\ + XFM_SF(REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\ + XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \ + XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \ + XFM_SF(SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \ + XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \ + XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \ + XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \ + XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \ + XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \ + XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \ + XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \ + XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \ + XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \ + XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \ + XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \ + XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \ + XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \ + XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \ + XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \ + XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \ + XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \ + XFM_SF(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL_H_INIT_INT_RGB_Y, mask_sh), \ + XFM_SF(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL_H_INIT_FRAC_RGB_Y, mask_sh), \ + XFM_SF(SCL_HORZ_FILTER_INIT_CHROMA, SCL_H_INIT_INT_CBCR, mask_sh), \ + XFM_SF(SCL_HORZ_FILTER_INIT_CHROMA, SCL_H_INIT_FRAC_CBCR, mask_sh), \ + XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \ + XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \ + XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_FILTER_PICK_NEAREST, mask_sh), \ + XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_FILTER_PICK_NEAREST, mask_sh), \ + XFM_SF(DC_LB_MEMORY_SPLIT, DC_LB_MEMORY_CONFIG, mask_sh), \ + XFM_SF(DC_LB_MEM_SIZE, DC_LB_MEM_SIZE, mask_sh) +#endif + #define XFM_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) \ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \ @@ -302,6 +441,7 @@ type DCP_RGB_RANDOM_ENABLE; \ type DCP_HIGHPASS_RANDOM_ENABLE; \ type DENORM_MODE; \ + type INTERLEAVE_EN; \ type PIXEL_DEPTH; \ type PIXEL_EXPAN_MODE; \ type GAMUT_REMAP_C11; \ @@ -365,12 +505,20 @@ type SCL_V_SCALE_RATIO; \ type SCL_H_INIT_INT; \ type SCL_H_INIT_FRAC; \ + type SCL_H_INIT_INT_RGB_Y; \ + type SCL_H_INIT_FRAC_RGB_Y; \ + type SCL_H_INIT_INT_CBCR; \ + type SCL_H_INIT_FRAC_CBCR; \ type SCL_V_INIT_INT; \ type SCL_V_INIT_FRAC; \ + type DC_LB_MEMORY_CONFIG; \ + type DC_LB_MEM_SIZE; \ type LB_MEMORY_CONFIG; \ type LB_MEMORY_SIZE; \ type SCL_V_2TAP_HARDCODE_COEF_EN; \ type SCL_H_2TAP_HARDCODE_COEF_EN; \ + type SCL_V_FILTER_PICK_NEAREST; \ + type SCL_H_FILTER_PICK_NEAREST; \ type SCL_COEF_UPDATE_COMPLETE; \ type ALPHA_EN @@ -383,6 +531,9 @@ struct dce_transform_mask { }; struct dce_transform_registers { +#if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t DATA_FORMAT; +#endif uint32_t LB_DATA_FORMAT; uint32_t GAMUT_REMAP_CONTROL; uint32_t GAMUT_REMAP_C11_C12; @@ -438,8 +589,16 @@ struct dce_transform_registers { uint32_t SCL_HORZ_FILTER_SCALE_RATIO; uint32_t SCL_VERT_FILTER_SCALE_RATIO; uint32_t SCL_HORZ_FILTER_INIT; +#if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA; + uint32_t SCL_HORZ_FILTER_INIT_CHROMA; +#endif uint32_t SCL_VERT_FILTER_INIT; uint32_t SCL_AUTOMATIC_MODE_CONTROL; +#if defined(CONFIG_DRM_AMD_DC_SI) + uint32_t DC_LB_MEMORY_SPLIT; + uint32_t DC_LB_MEM_SIZE; +#endif uint32_t LB_MEMORY_CTRL; uint32_t SCL_UPDATE; uint32_t SCL_F_SHARP_CONTROL; @@ -457,6 +616,16 @@ struct scl_ratios_inits { struct init_int_and_frac v_init; }; +#if defined(CONFIG_DRM_AMD_DC_SI) +struct sclh_ratios_inits { + uint32_t h_int_scale_ratio; + uint32_t v_int_scale_ratio; + struct init_int_and_frac h_init_luma; + struct init_int_and_frac h_init_chroma; + struct init_int_and_frac v_init; +}; +#endif + enum ram_filter_type { FILTER_TYPE_RGB_Y_VERTICAL = 0, /* 0 - RGB/Y Vertical filter */ FILTER_TYPE_CBCR_VERTICAL = 1, /* 1 - CbCr Vertical filter */ @@ -489,6 +658,15 @@ void dce_transform_construct(struct dce_transform *xfm_dce, const struct dce_transform_shift *xfm_shift, const struct dce_transform_mask *xfm_mask); +#if defined(CONFIG_DRM_AMD_DC_SI) +void dce60_transform_construct(struct dce_transform *xfm_dce, + struct dc_context *ctx, + uint32_t inst, + const struct dce_transform_registers *regs, + const struct dce_transform_shift *xfm_shift, + const struct dce_transform_mask *xfm_mask); +#endif + bool dce_transform_get_optimal_number_of_taps( struct transform *xfm, struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 49380ed3aeae14ba96839ca99ea1a4f0971d3477..0603ddca7bd06ff0ccd84d8cd7e0e38d2cb9a116 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -720,6 +720,7 @@ void dce110_edp_wait_for_hpd_ready( struct dc_context *ctx = link->ctx; struct graphics_object_id connector = link->link_enc->connector; struct gpio *hpd; + struct dc_sink *sink = link->local_sink; bool edp_hpd_high = false; uint32_t time_elapsed = 0; uint32_t timeout = power_up ? @@ -752,6 +753,14 @@ void dce110_edp_wait_for_hpd_ready( return; } + if (sink != NULL) { + if (sink->edid_caps.panel_patch.extra_t3_ms > 0) { + int extra_t3_in_ms = sink->edid_caps.panel_patch.extra_t3_ms; + + msleep(extra_t3_in_ms); + } + } + dal_gpio_open(hpd, GPIO_MODE_INTERRUPT); /* wait until timeout or panel detected */ @@ -842,6 +851,17 @@ void dce110_edp_power_control( cntl.coherent = false; cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; + + if (ctx->dc->ctx->dmub_srv && + ctx->dc->debug.dmub_command_table) { + if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) + bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_POWER_ON); + else + bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_POWER_OFF); + } + bp_result = link_transmitter_control(ctx->dc_bios, &cntl); if (!power_up) @@ -919,8 +939,21 @@ void dce110_edp_backlight_control( /*edp 1.2*/ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) edp_receiver_ready_T7(link); + + if (ctx->dc->ctx->dmub_srv && + ctx->dc->debug.dmub_command_table) { + if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) + ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_LCD_BLON); + else + ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_LCD_BLOFF); + } + link_transmitter_control(ctx->dc_bios, &cntl); + + if (enable && link->dpcd_sink_ext_caps.bits.oled) msleep(OLED_POST_T7_DELAY); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 5d83e8174005e6d66de6a699eb1313b354850f50..0853bc9917c75e6adca187fbde7c843fedb1ad0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -1017,7 +1017,7 @@ enum dc_status dce112_add_stream_to_ctx( struct dc_state *new_ctx, struct dc_stream_state *dc_stream) { - enum dc_status result = DC_ERROR_UNEXPECTED; + enum dc_status result; result = resource_map_pool_resources(dc, new_ctx, dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7036c3bd0f871d8689af6a0a3b58cf247f9214f9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile @@ -0,0 +1,34 @@ +# +# Copyright 2020 Mauro Rossi +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'controller' sub-component of DAL. +# It provides the control and status of HW CRTC block. + +DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ + dce60_resource.o + +AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DCE60) + + + diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c new file mode 100644 index 0000000000000000000000000000000000000000..920c7ae29d5301fc886a4adea9b90320845f14eb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c @@ -0,0 +1,432 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" +#include "core_types.h" +#include "dce60_hw_sequencer.h" + +#include "dce/dce_hwseq.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dce100/dce100_hw_sequencer.h" + +/* include DCE6 register header files */ +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#define DC_LOGGER_INIT() + +/******************************************************************************* + * Private definitions + ******************************************************************************/ + +/***************************PIPE_CONTROL***********************************/ + +/* + * Check if FBC can be enabled + */ +static bool dce60_should_enable_fbc(struct dc *dc, + struct dc_state *context, + uint32_t *pipe_idx) +{ + uint32_t i; + struct pipe_ctx *pipe_ctx = NULL; + struct resource_context *res_ctx = &context->res_ctx; + unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; + + + ASSERT(dc->fbc_compressor); + + /* FBC memory should be allocated */ + if (!dc->ctx->fbc_gpu_addr) + return false; + + /* Only supports single display */ + if (context->stream_count != 1) + return false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (res_ctx->pipe_ctx[i].stream) { + + pipe_ctx = &res_ctx->pipe_ctx[i]; + + if (!pipe_ctx) + continue; + + /* fbc not applicable on underlay pipe */ + if (pipe_ctx->pipe_idx != underlay_idx) { + *pipe_idx = i; + break; + } + } + } + + if (i == dc->res_pool->pipe_count) + return false; + + if (!pipe_ctx->stream->link) + return false; + + /* Only supports eDP */ + if (pipe_ctx->stream->link->connector_signal != SIGNAL_TYPE_EDP) + return false; + + /* PSR should not be enabled */ + if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled) + return false; + + /* Nothing to compress */ + if (!pipe_ctx->plane_state) + return false; + + /* Only for non-linear tiling */ + if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) + return false; + + return true; +} + +/* + * Enable FBC + */ +static void dce60_enable_fbc( + struct dc *dc, + struct dc_state *context) +{ + uint32_t pipe_idx = 0; + + if (dce60_should_enable_fbc(dc, context, &pipe_idx)) { + /* Program GRPH COMPRESSED ADDRESS and PITCH */ + struct compr_addr_and_pitch_params params = {0, 0, 0}; + struct compressor *compr = dc->fbc_compressor; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; + + params.source_view_width = pipe_ctx->stream->timing.h_addressable; + params.source_view_height = pipe_ctx->stream->timing.v_addressable; + params.inst = pipe_ctx->stream_res.tg->inst; + compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr; + + compr->funcs->surface_address_and_pitch(compr, ¶ms); + compr->funcs->set_fbc_invalidation_triggers(compr, 1); + + compr->funcs->enable_fbc(compr, ¶ms); + } +} + + +/******************************************************************************* + * Front End programming + ******************************************************************************/ + +static void dce60_set_default_colors(struct pipe_ctx *pipe_ctx) +{ + struct default_adjustment default_adjust = { 0 }; + + default_adjust.force_hw_default = false; + default_adjust.in_color_space = pipe_ctx->plane_state->color_space; + default_adjust.out_color_space = pipe_ctx->stream->output_color_space; + default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW; + default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format; + + /* display color depth */ + default_adjust.color_depth = + pipe_ctx->stream->timing.display_color_depth; + + /* Lb color depth */ + default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth; + + pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default( + pipe_ctx->plane_res.xfm, &default_adjust); +} + +/******************************************************************************* + * In order to turn on surface we will program + * CRTC + * + * DCE6 has no bottom_pipe and no Blender HW + * We need to set 'blank_target' to false in order to turn on the display + * + * |-----------|------------|---------| + * |curr pipe | set_blank | | + * |Surface |blank_target| CRCT | + * |visibility | argument | | + * |-----------|------------|---------| + * | off | true | blank | + * | on | false | unblank | + * |-----------|------------|---------| + * + ******************************************************************************/ +static void dce60_program_surface_visibility(const struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + bool blank_target = false; + + /* DCE6 has no bottom_pipe and no Blender HW */ + + if (!pipe_ctx->plane_state->visible) + blank_target = true; + + /* DCE6 skip dce_set_blender_mode() but then proceed to 'unblank' CRTC */ + pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target); + +} + + +static void dce60_get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4; + + switch (pipe_ctx->plane_res.scl_data.format) { + case PIXEL_FORMAT_ARGB8888: + /* set boarder color to red */ + color->color_r_cr = color_value; + break; + + case PIXEL_FORMAT_ARGB2101010: + /* set boarder color to blue */ + color->color_b_cb = color_value; + break; + case PIXEL_FORMAT_420BPP8: + /* set boarder color to green */ + color->color_g_y = color_value; + break; + case PIXEL_FORMAT_420BPP10: + /* set boarder color to yellow */ + color->color_g_y = color_value; + color->color_r_cr = color_value; + break; + case PIXEL_FORMAT_FP16: + /* set boarder color to white */ + color->color_r_cr = color_value; + color->color_b_cb = color_value; + color->color_g_y = color_value; + break; + default: + break; + } +} + +static void dce60_program_scaler(const struct dc *dc, + const struct pipe_ctx *pipe_ctx) +{ + struct tg_color color = {0}; + + /* DCE6 skips DCN TOFPGA check for transform_set_pixel_storage_depth == NULL */ + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + dce60_get_surface_visual_confirm_color(pipe_ctx, &color); + else + color_space_to_black_color(dc, + pipe_ctx->stream->output_color_space, + &color); + + pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth( + pipe_ctx->plane_res.xfm, + pipe_ctx->plane_res.scl_data.lb_params.depth, + &pipe_ctx->stream->bit_depth_params); + + if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + color.color_r_cr = color.color_g_y; + + pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( + pipe_ctx->stream_res.tg, + &color); + } + + pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, + &pipe_ctx->plane_res.scl_data); +} + +static void +dce60_program_front_end_for_pipe( + struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + struct mem_input *mi = pipe_ctx->plane_res.mi; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct xfm_grph_csc_adjustment adjust; + struct out_csc_color_matrix tbl_entry; + unsigned int i; + struct dce_hwseq *hws = dc->hwseq; + + DC_LOGGER_INIT(); + memset(&tbl_entry, 0, sizeof(tbl_entry)); + + memset(&adjust, 0, sizeof(adjust)); + adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + + dce_enable_fe_clock(dc->hwseq, mi->inst, true); + + dce60_set_default_colors(pipe_ctx); + if (pipe_ctx->stream->csc_color_matrix.enable_adjustment + == true) { + tbl_entry.color_space = + pipe_ctx->stream->output_color_space; + + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = + pipe_ctx->stream->csc_color_matrix.matrix[i]; + + pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment + (pipe_ctx->plane_res.xfm, &tbl_entry); + } + + if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { + adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + + for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) + adjust.temperature_matrix[i] = + pipe_ctx->stream->gamut_remap_matrix.matrix[i]; + } + + pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); + + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; + + dce60_program_scaler(dc, pipe_ctx); + + mi->funcs->mem_input_program_surface_config( + mi, + plane_state->format, + &plane_state->tiling_info, + &plane_state->plane_size, + plane_state->rotation, + NULL, + false); + if (mi->funcs->set_blank) + mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible); + + if (dc->config.gpu_vm_support) + mi->funcs->mem_input_program_pte_vm( + pipe_ctx->plane_res.mi, + plane_state->format, + &plane_state->tiling_info, + plane_state->rotation); + + /* Moved programming gamma from dc to hwss */ + if (pipe_ctx->plane_state->update_flags.bits.full_update || + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change) + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); + + if (pipe_ctx->plane_state->update_flags.bits.full_update) + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); + + DC_LOG_SURFACE( + "Pipe:%d %p: addr hi:0x%x, " + "addr low:0x%x, " + "src: %d, %d, %d," + " %d; dst: %d, %d, %d, %d;" + "clip: %d, %d, %d, %d\n", + pipe_ctx->pipe_idx, + (void *) pipe_ctx->plane_state, + pipe_ctx->plane_state->address.grph.addr.high_part, + pipe_ctx->plane_state->address.grph.addr.low_part, + pipe_ctx->plane_state->src_rect.x, + pipe_ctx->plane_state->src_rect.y, + pipe_ctx->plane_state->src_rect.width, + pipe_ctx->plane_state->src_rect.height, + pipe_ctx->plane_state->dst_rect.x, + pipe_ctx->plane_state->dst_rect.y, + pipe_ctx->plane_state->dst_rect.width, + pipe_ctx->plane_state->dst_rect.height, + pipe_ctx->plane_state->clip_rect.x, + pipe_ctx->plane_state->clip_rect.y, + pipe_ctx->plane_state->clip_rect.width, + pipe_ctx->plane_state->clip_rect.height); + + DC_LOG_SURFACE( + "Pipe %d: width, height, x, y\n" + "viewport:%d, %d, %d, %d\n" + "recout: %d, %d, %d, %d\n", + pipe_ctx->pipe_idx, + pipe_ctx->plane_res.scl_data.viewport.width, + pipe_ctx->plane_res.scl_data.viewport.height, + pipe_ctx->plane_res.scl_data.viewport.x, + pipe_ctx->plane_res.scl_data.viewport.y, + pipe_ctx->plane_res.scl_data.recout.width, + pipe_ctx->plane_res.scl_data.recout.height, + pipe_ctx->plane_res.scl_data.recout.x, + pipe_ctx->plane_res.scl_data.recout.y); +} + +static void dce60_apply_ctx_for_surface( + struct dc *dc, + const struct dc_stream_state *stream, + int num_planes, + struct dc_state *context) +{ + int i; + + if (num_planes == 0) + return; + + if (dc->fbc_compressor) + dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream != stream) + continue; + + /* Need to allocate mem before program front end for Fiji */ + pipe_ctx->plane_res.mi->funcs->allocate_mem_input( + pipe_ctx->plane_res.mi, + pipe_ctx->stream->timing.h_total, + pipe_ctx->stream->timing.v_total, + pipe_ctx->stream->timing.pix_clk_100hz / 10, + context->stream_count); + + dce60_program_front_end_for_pipe(dc, pipe_ctx); + + dc->hwss.update_plane_addr(dc, pipe_ctx); + + dce60_program_surface_visibility(dc, pipe_ctx); + + } + + if (dc->fbc_compressor) + dce60_enable_fbc(dc, context); +} + +void dce60_hw_sequencer_construct(struct dc *dc) +{ + dce110_hw_sequencer_construct(dc); + + dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating; + dc->hwss.apply_ctx_for_surface = dce60_apply_ctx_for_surface; + dc->hwss.cursor_lock = dce60_pipe_control_lock; + dc->hwss.pipe_control_lock = dce60_pipe_control_lock; + dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h new file mode 100644 index 0000000000000000000000000000000000000000..f3b2d8b60d5b79b99a11086c67167ee6962bc41d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h @@ -0,0 +1,37 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCE60_H__ +#define __DC_HWSS_DCE60_H__ + +#include "core_types.h" +#include "hw_sequencer_private.h" + +struct dc; + +void dce60_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_HWSS_DCE60_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c new file mode 100644 index 0000000000000000000000000000000000000000..5a5a9cb77acbf33771084438898eebcd84375f89 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -0,0 +1,1527 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#include "dm_services.h" + +#include "link_encoder.h" +#include "stream_encoder.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "irq/dce60/irq_service_dce60.h" +#include "dce110/dce110_timing_generator.h" +#include "dce110/dce110_resource.h" +#include "dce60/dce60_timing_generator.h" +#include "dce/dce_mem_input.h" +#include "dce/dce_link_encoder.h" +#include "dce/dce_stream_encoder.h" +#include "dce/dce_ipp.h" +#include "dce/dce_transform.h" +#include "dce/dce_opp.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "dce60/dce60_hw_sequencer.h" +#include "dce100/dce100_resource.h" +#include "dce/dce_panel_cntl.h" + +#include "reg_helper.h" + +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_abm.h" +#include "dce/dce_i2c.h" +/* TODO remove this include */ + +#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT +#include "gmc/gmc_6_0_d.h" +#include "gmc/gmc_6_0_sh_mask.h" +#endif + +#ifndef mmDP_DPHY_INTERNAL_CTRL +#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE +#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE +#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE +#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE +#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE +#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE +#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE +#endif + + +#ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC + #define mmBIOS_SCRATCH_6 0x05CF +#endif + +#ifndef mmDP_DPHY_FAST_TRAINING + #define mmDP_DPHY_FAST_TRAINING 0x1CCE + #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE + #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE + #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE + #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE + #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE + #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE +#endif + + +#ifndef mmHPD_DC_HPD_CONTROL + #define mmHPD_DC_HPD_CONTROL 0x189A + #define mmHPD0_DC_HPD_CONTROL 0x189A + #define mmHPD1_DC_HPD_CONTROL 0x18A2 + #define mmHPD2_DC_HPD_CONTROL 0x18AA + #define mmHPD3_DC_HPD_CONTROL 0x18B2 + #define mmHPD4_DC_HPD_CONTROL 0x18BA + #define mmHPD5_DC_HPD_CONTROL 0x18C2 +#endif + +#define DCE11_DIG_FE_CNTL 0x4a00 +#define DCE11_DIG_BE_CNTL 0x4a47 +#define DCE11_DP_SEC 0x4ac3 + +static const struct dce110_timing_generator_offsets dce60_tg_offsets[] = { + { + .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL3 + - mmDPG_PIPE_ARBITRATION_CONTROL3), + }, + { + .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL3 + - mmDPG_PIPE_ARBITRATION_CONTROL3), + }, + { + .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL3 + - mmDPG_PIPE_ARBITRATION_CONTROL3), + }, + { + .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL3 + - mmDPG_PIPE_ARBITRATION_CONTROL3), + }, + { + .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL3 + - mmDPG_PIPE_ARBITRATION_CONTROL3), + }, + { + .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL3 + - mmDPG_PIPE_ARBITRATION_CONTROL3), + } +}; + +/* set register offset */ +#define SR(reg_name)\ + .reg_name = mm ## reg_name + +/* set register offset with instance */ +#define SRI(reg_name, block, id)\ + .reg_name = mm ## block ## id ## _ ## reg_name + +#define ipp_regs(id)\ +[id] = {\ + IPP_COMMON_REG_LIST_DCE_BASE(id)\ +} + +static const struct dce_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), + ipp_regs(4), + ipp_regs(5) +}; + +static const struct dce_ipp_shift ipp_shift = { + IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_ipp_mask ipp_mask = { + IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +#define transform_regs(id)\ +[id] = {\ + XFM_COMMON_REG_LIST_DCE60(id)\ +} + +static const struct dce_transform_registers xfm_regs[] = { + transform_regs(0), + transform_regs(1), + transform_regs(2), + transform_regs(3), + transform_regs(4), + transform_regs(5) +}; + +static const struct dce_transform_shift xfm_shift = { + XFM_COMMON_MASK_SH_LIST_DCE60(__SHIFT) +}; + +static const struct dce_transform_mask xfm_mask = { + XFM_COMMON_MASK_SH_LIST_DCE60(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id)\ +[id] = {\ + LE_DCE60_REG_LIST(id)\ +} + +static const struct dce110_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3), + link_regs(4), + link_regs(5) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_COMMON_REG_LIST_DCE_BASE(id),\ + .AFMT_CNTL = 0,\ +} + +static const struct dce110_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5) +}; + +static const struct dce_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) +}; + +static const struct dce_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_DCE_60_REG_LIST(id),\ +} + +static const struct dce_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dce_opp_shift opp_shift = { + OPP_COMMON_MASK_SH_LIST_DCE_60(__SHIFT) +}; + +static const struct dce_opp_mask opp_mask = { + OPP_COMMON_MASK_SH_LIST_DCE_60(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), +}; + +static const struct dce_audio_shift audio_shift = { + AUD_DCE60_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + AUD_DCE60_MASK_SH_LIST(_MASK) +}; + +#define clk_src_regs(id)\ +[id] = {\ + CS_COMMON_REG_LIST_DCE_80(id),\ +} + + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0), + clk_src_regs(1), + clk_src_regs(2) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, + .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 +}; + +static const struct resource_caps res_cap = { + .num_timing_generator = 6, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 2, + .num_ddc = 6, +}; + +static const struct resource_caps res_cap_61 = { + .num_timing_generator = 4, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 3, + .num_ddc = 6, +}; + +static const struct resource_caps res_cap_64 = { + .num_timing_generator = 2, + .num_audio = 2, + .num_stream_encoder = 2, + .num_pll = 2, + .num_ddc = 2, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCE_RGB, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } +}; + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCE60_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCE60(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCE60(_MASK) +}; +static const struct dce_abm_registers abm_regs = { + ABM_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCE110(_MASK) +}; + +#define CTX ctx +#define REG(reg) mm ## reg + +#ifndef mmCC_DC_HDMI_STRAPS +#define mmCC_DC_HDMI_STRAPS 0x1918 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 +#endif + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + REG_GET_2(CC_DC_HDMI_STRAPS, + HDMI_DISABLE, &straps->hdmi_disable, + AUDIO_STREAM_NUMBER, &straps->audio_stream_number); + + REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce60_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct timing_generator *dce60_timing_generator_create( + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + struct dce110_timing_generator *tg110 = + kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); + + if (!tg110) + return NULL; + + dce60_timing_generator_construct(tg110, ctx, instance, offsets); + return &tg110->base; +} + +static struct output_pixel_processor *dce60_opp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce110_opp *opp = + kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); + + if (!opp) + return NULL; + + dce60_opp_construct(opp, + ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +struct dce_aux *dce60_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +struct dce_i2c_hw *dce60_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +struct dce_i2c_sw *dce60_i2c_sw_create( + struct dc_context *ctx) +{ + struct dce_i2c_sw *dce_i2c_sw = + kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); + + if (!dce_i2c_sw) + return NULL; + + dce_i2c_sw_construct(dce_i2c_sw, ctx); + + return dce_i2c_sw; +} +static struct stream_encoder *dce60_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dce110_stream_encoder *enc110 = + kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + + if (!enc110) + return NULL; + + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + return &enc110->base; +} + +#define SRII(reg_name, block, id)\ + .reg_name[id] = mm ## block ## id ## _ ## reg_name + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCE6_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCE6_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCE6_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dce60_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce60_stream_encoder_create, + .create_hwseq = dce60_hwseq_create, +}; + +#define mi_inst_regs(id) { \ + MI_DCE6_REG_LIST(id), \ + .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ +} +static const struct dce_mem_input_registers mi_regs[] = { + mi_inst_regs(0), + mi_inst_regs(1), + mi_inst_regs(2), + mi_inst_regs(3), + mi_inst_regs(4), + mi_inst_regs(5), +}; + +static const struct dce_mem_input_shift mi_shifts = { + MI_DCE6_MASK_SH_LIST(__SHIFT), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT +}; + +static const struct dce_mem_input_mask mi_masks = { + MI_DCE6_MASK_SH_LIST(_MASK), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK +}; + +static struct mem_input *dce60_mem_input_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), + GFP_KERNEL); + + if (!dce_mi) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce60_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + dce_mi->wa.single_head_rdreq_dmif_limit = 2; + return &dce_mi->base; +} + +static void dce60_transform_destroy(struct transform **xfm) +{ + kfree(TO_DCE_TRANSFORM(*xfm)); + *xfm = NULL; +} + +static struct transform *dce60_transform_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_transform *transform = + kzalloc(sizeof(struct dce_transform), GFP_KERNEL); + + if (!transform) + return NULL; + + dce60_transform_construct(transform, ctx, inst, + &xfm_regs[inst], &xfm_shift, &xfm_mask); + transform->prescaler_on = false; + return &transform->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 297000, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true +}; + +struct link_encoder *dce60_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dce110_link_encoder *enc110 = + kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc110) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dce60_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source]); + return &enc110->base; +} + +static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +struct clock_source *dce60_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce110_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +void dce60_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static struct input_pixel_processor *dce60_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce60_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static void dce60_resource_destruct(struct dce110_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + dce110_opp_destroy(&pool->base.opps[i]); + + if (pool->base.transforms[i] != NULL) + dce60_transform_destroy(&pool->base.transforms[i]); + + if (pool->base.ipps[i] != NULL) + dce_ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.mis[i] != NULL) { + kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); + pool->base.mis[i] = NULL; + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) + kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dce60_clock_source_destroy(&pool->base.clock_sources[i]); + } + } + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.dp_clock_source != NULL) + dce60_clock_source_destroy(&pool->base.dp_clock_source); + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i] != NULL) { + dce_aud_destroy(&pool->base.audios[i]); + } + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } +} + +bool dce60_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw_ctx.bw.dce.dispclk_khz = 681000; + context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + } else { + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; + } + + return true; +} + +static bool dce60_validate_surface_sets( + struct dc_state *context) +{ + int i; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 1) + return false; + + if (context->stream_status[i].plane_states[0]->format + >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return false; + } + + return true; +} + +enum dc_status dce60_validate_global( + struct dc *dc, + struct dc_state *context) +{ + if (!dce60_validate_surface_sets(context)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static void dce60_destroy_resource_pool(struct resource_pool **pool) +{ + struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); + + dce60_resource_destruct(dce110_pool); + kfree(dce110_pool); + *pool = NULL; +} + +static const struct resource_funcs dce60_res_pool_funcs = { + .destroy = dce60_destroy_resource_pool, + .link_enc_create = dce60_link_encoder_create, + .panel_cntl_create = dce60_panel_cntl_create, + .validate_bandwidth = dce60_validate_bandwidth, + .validate_plane = dce100_validate_plane, + .add_stream_to_ctx = dce100_add_stream_to_ctx, + .validate_global = dce60_validate_global, + .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link +}; + +static bool dce60_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap; + pool->base.funcs = &dce60_res_pool_funcs; + + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap.num_timing_generator; + pool->base.timing_generator_count = res_cap.num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.max_cursor_size = 64; + dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clk_src_count = 2; + + } else { + pool->base.dp_clock_source = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clk_src_count = 1; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce60_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce60_timing_generator_create( + ctx, i, &dce60_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce60_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce60_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce60_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce60_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce60_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.disable_dp_clk_share = true; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce60_hw_sequencer_construct(dc); + + return true; + +res_create_fail: + dce60_resource_destruct(pool); + return false; +} + +struct resource_pool *dce60_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce60_construct(num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool dce61_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_61; + pool->base.funcs = &dce60_res_pool_funcs; + + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap_61.num_timing_generator; + pool->base.timing_generator_count = res_cap_61.num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.max_cursor_size = 64; + dc->caps.is_apu = true; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[2] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 3; + + } else { + pool->base.dp_clock_source = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce60_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce60_timing_generator_create( + ctx, i, &dce60_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce60_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce60_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce60_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce60_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce60_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.disable_dp_clk_share = true; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce60_hw_sequencer_construct(dc); + + return true; + +res_create_fail: + dce60_resource_destruct(pool); + return false; +} + +struct resource_pool *dce61_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce61_construct(num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool dce64_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_64; + pool->base.funcs = &dce60_res_pool_funcs; + + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap_64.num_timing_generator; + pool->base.timing_generator_count = res_cap_64.num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.max_cursor_size = 64; + dc->caps.is_apu = true; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + pool->base.clk_src_count = 2; + + } else { + pool->base.dp_clock_source = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + pool->base.clk_src_count = 1; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce60_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce60_timing_generator_create( + ctx, i, &dce60_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce60_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce60_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce60_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce60_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce60_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.disable_dp_clk_share = true; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce60_hw_sequencer_construct(dc); + + return true; + +res_create_fail: + dce60_resource_destruct(pool); + return false; +} + +struct resource_pool *dce64_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce64_construct(num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h new file mode 100644 index 0000000000000000000000000000000000000000..5d653a76b0b05eb42c0e576d8c2e6e0cbee1e6d2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h @@ -0,0 +1,47 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCE60_H__ +#define __DC_RESOURCE_DCE60_H__ + +#include "core_types.h" + +struct dc; +struct resource_pool; + +struct resource_pool *dce60_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +struct resource_pool *dce61_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +struct resource_pool *dce64_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +#endif /* __DC_RESOURCE_DCE60_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c new file mode 100644 index 0000000000000000000000000000000000000000..fc1af0ff0ca4c5de361121f72cb545108cb31eba --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c @@ -0,0 +1,266 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +/* include DCE6 register header files */ +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#include "dc_types.h" + +#include "include/grph_object_id.h" +#include "include/logger_interface.h" +#include "../dce110/dce110_timing_generator.h" +#include "dce60_timing_generator.h" + +#include "timing_generator.h" + +enum black_color_format { + BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */ + BLACK_COLOR_FORMAT_RGB_LIMITED, + BLACK_COLOR_FORMAT_YUV_TV, + BLACK_COLOR_FORMAT_YUV_CV, + BLACK_COLOR_FORMAT_YUV_SUPER_AA, + + BLACK_COLOR_FORMAT_COUNT +}; + +static const struct dce110_timing_generator_offsets reg_offsets[] = { +{ + .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), + .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), +}, +{ + .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), + .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), +}, +{ + .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), + .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), +}, +{ + .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), + .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), +}, +{ + .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), + .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), +}, +{ + .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL), + .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL), +} +}; + +#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10 + +#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1) +#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1) + +#define CRTC_REG(reg) (reg + tg110->offsets.crtc) +#define DCP_REG(reg) (reg + tg110->offsets.dcp) +#define DMIF_REG(reg) (reg + tg110->offsets.dmif) + +static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz) +{ + uint64_t pix_dur; + uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 + + DCE110TG_FROM_TG(tg)->offsets.dmif; + uint32_t value = dm_read_reg(tg->ctx, addr); + + if (pix_clk_100hz == 0) + return; + + pix_dur = div_u64(10000000000ull, pix_clk_100hz); + + set_reg_field_value( + value, + pix_dur, + DPG_PIPE_ARBITRATION_CONTROL1, + PIXEL_DURATION); + + dm_write_reg(tg->ctx, addr, value); +} + +static void program_timing(struct timing_generator *tg, + const struct dc_crtc_timing *timing, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width, + const enum signal_type signal, + bool use_vbios) +{ + if (!use_vbios) + program_pix_dur(tg, timing->pix_clk_100hz); + + dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios); +} + +static void dce60_timing_generator_enable_advanced_request( + struct timing_generator *tg, + bool enable, + const struct dc_crtc_timing *timing) +{ + struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); + uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL); + uint32_t value = dm_read_reg(tg->ctx, addr); + /* DCE6 has CRTC_PREFETCH_EN bit in CRTC_CONTROL register */ + uint32_t addr2 = CRTC_REG(mmCRTC_CONTROL); + uint32_t value2 = dm_read_reg(tg->ctx, addr2); + + /* DCE6 does not support CRTC_LEGACY_REQUESTOR_EN bit + so here is not possible to set bit based on enable argument */ + + if ((timing->v_sync_width + timing->v_front_porch) <= 3) { + set_reg_field_value( + value, + 3, + CRTC_START_LINE_CONTROL, + CRTC_ADVANCED_START_LINE_POSITION); + set_reg_field_value( + value2, + 0, + CRTC_CONTROL, + CRTC_PREFETCH_EN); + } else { + set_reg_field_value( + value, + 4, + CRTC_START_LINE_CONTROL, + CRTC_ADVANCED_START_LINE_POSITION); + set_reg_field_value( + value2, + 1, + CRTC_CONTROL, + CRTC_PREFETCH_EN); + } + + set_reg_field_value( + value, + 1, + CRTC_START_LINE_CONTROL, + CRTC_PROGRESSIVE_START_LINE_EARLY); + + set_reg_field_value( + value, + 1, + CRTC_START_LINE_CONTROL, + CRTC_INTERLACE_START_LINE_EARLY); + + dm_write_reg(tg->ctx, addr, value); + dm_write_reg(tg->ctx, addr2, value2); +} + +static bool dce60_is_tg_enabled(struct timing_generator *tg) +{ + uint32_t addr = 0; + uint32_t value = 0; + uint32_t field = 0; + struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); + + addr = CRTC_REG(mmCRTC_CONTROL); + value = dm_read_reg(tg->ctx, addr); + field = get_reg_field_value(value, CRTC_CONTROL, + CRTC_CURRENT_MASTER_EN_STATE); + return field == 1; +} + +bool dce60_configure_crc(struct timing_generator *tg, + const struct crc_params *params) +{ + /* Cannot configure crc on a CRTC that is disabled */ + if (!dce60_is_tg_enabled(tg)) + return false; + + /* DCE6 has no CRTC_CRC_CNTL register, nothing to do */ + + return true; +} + +static const struct timing_generator_funcs dce60_tg_funcs = { + .validate_timing = dce110_tg_validate_timing, + .program_timing = program_timing, + .enable_crtc = dce110_timing_generator_enable_crtc, + .disable_crtc = dce110_timing_generator_disable_crtc, + .is_counter_moving = dce110_timing_generator_is_counter_moving, + .get_position = dce110_timing_generator_get_position, + .get_frame_count = dce110_timing_generator_get_vblank_counter, + .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos, + .set_early_control = dce110_timing_generator_set_early_control, + .wait_for_state = dce110_tg_wait_for_state, + .set_blank = dce110_tg_set_blank, + .is_blanked = dce110_tg_is_blanked, + .set_colors = dce110_tg_set_colors, + .set_overscan_blank_color = + dce110_timing_generator_set_overscan_color_black, + .set_blank_color = dce110_timing_generator_program_blank_color, + .disable_vga = dce110_timing_generator_disable_vga, + .did_triggered_reset_occur = + dce110_timing_generator_did_triggered_reset_occur, + .setup_global_swap_lock = + dce110_timing_generator_setup_global_swap_lock, + .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger, + .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger, + .tear_down_global_swap_lock = + dce110_timing_generator_tear_down_global_swap_lock, + .set_drr = dce110_timing_generator_set_drr, + .set_static_screen_control = + dce110_timing_generator_set_static_screen_control, + .set_test_pattern = dce110_timing_generator_set_test_pattern, + .arm_vert_intr = dce110_arm_vert_intr, + + /* DCE6.0 overrides */ + .enable_advanced_request = + dce60_timing_generator_enable_advanced_request, + .configure_crc = dce60_configure_crc, + .get_crc = dce110_get_crc, +}; + +void dce60_timing_generator_construct( + struct dce110_timing_generator *tg110, + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + tg110->controller_id = CONTROLLER_ID_D0 + instance; + tg110->base.inst = instance; + tg110->offsets = *offsets; + tg110->derived_offsets = reg_offsets[instance]; + + tg110->base.funcs = &dce60_tg_funcs; + + tg110->base.ctx = ctx; + tg110->base.bp = ctx->dc_bios; + + tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1; + tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1; + + tg110->min_h_blank = 56; + tg110->min_h_front_porch = 4; + tg110->min_h_back_porch = 4; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..81d831233cc5e2715fefb5d4aa12fe272df35a62 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_TIMING_GENERATOR_DCE60_H__ +#define __DC_TIMING_GENERATOR_DCE60_H__ + +#include "timing_generator.h" +#include "../include/grph_object_id.h" + +/* DCE6.0 implementation inherits from DCE11.0 */ +void dce60_timing_generator_construct( + struct dce110_timing_generator *tg, + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets); + +#endif /* __DC_TIMING_GENERATOR_DCE60_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 62ad1a11bff9c5623300936bf8f5742f6c814ceb..733e6e6e43bd65529162f1201cd434591ebbb493 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -31,4 +31,11 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) +# fix: +# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types +# aarch64 does not support soft-float, so use hard-float and handle this in code +ifdef CONFIG_ARM64 +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only +endif + AMD_DISPLAY_FILES += $(AMD_DAL_DCN10) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a643927e272bdae2eeae61eede89a70087677d5e..8ca94f5061957a2843ec8f16eade8b90cb71f9a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1450,33 +1450,42 @@ void dcn10_init_hw(struct dc *dc) void dcn10_power_down_on_boot(struct dc *dc) { int i = 0; + struct dc_link *edp_link; - if (dc->config.power_down_display_on_boot) { - struct dc_link *edp_link = get_edp_link(dc); - - if (edp_link && - edp_link->link_enc->funcs->is_dig_enabled && - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && - dc->hwseq->funcs.edp_backlight_control && - dc->hwss.power_down && - dc->hwss.edp_power_control) { - dc->hwseq->funcs.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); - dc->hwss.edp_power_control(edp_link, false); - } else { - for (i = 0; i < dc->link_count; i++) { - struct dc_link *link = dc->links[i]; - - if (link->link_enc->funcs->is_dig_enabled && - link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); - break; - } + if (!dc->config.power_down_display_on_boot) + return; + + edp_link = get_edp_link(dc); + if (edp_link && + edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + dc->hwseq->funcs.edp_backlight_control && + dc->hwss.power_down && + dc->hwss.edp_power_control) { + dc->hwseq->funcs.edp_backlight_control(edp_link, false); + dc->hwss.power_down(dc); + dc->hwss.edp_power_control(edp_link, false); + } else { + for (i = 0; i < dc->link_count; i++) { + struct dc_link *link = dc->links[i]; + if (link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc) && + dc->hwss.power_down) { + dc->hwss.power_down(dc); + break; } + } } + + /* + * Call update_clocks with empty context + * to send DISPLAY_OFF + * Otherwise DISPLAY_OFF may not be asserted + */ + if (dc->clk_mgr->funcs->set_low_power_state) + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); } void dcn10_reset_hw_ctx_wrap( @@ -2368,14 +2377,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) &blnd_cfg.black_color); } - /* - * The way 420 is packed, 2 channels carry Y component, 1 channel - * alternate between Cb and Cr, so both channels need the pixel - * value for Y - */ - if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) - blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y; - if (per_pixel_alpha) blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; else @@ -2760,6 +2761,152 @@ static struct pipe_ctx *dcn10_find_top_pipe_for_stream( return NULL; } +bool dcn10_disconnect_pipes( + struct dc *dc, + struct dc_state *context) +{ + bool found_stream = false; + int i, j; + struct dce_hwseq *hws = dc->hwseq; + struct dc_state *old_ctx = dc->current_state; + bool mpcc_disconnected = false; + struct pipe_ctx *old_pipe; + struct pipe_ctx *new_pipe; + DC_LOGGER_INIT(dc->ctx->logger); + + /* Set pipe update flags and lock pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + new_pipe = &context->res_ctx.pipe_ctx[i]; + new_pipe->update_flags.raw = 0; + + if (!old_pipe->plane_state && !new_pipe->plane_state) + continue; + + if (old_pipe->plane_state && !new_pipe->plane_state) + new_pipe->update_flags.bits.disable = 1; + + /* Check for scl update */ + if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) + new_pipe->update_flags.bits.scaler = 1; + + /* Check for vp update */ + if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) + || memcmp(&old_pipe->plane_res.scl_data.viewport_c, + &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) + new_pipe->update_flags.bits.viewport = 1; + + } + + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + /* Disconnect mpcc here only if losing pipe split*/ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && + old_ctx->res_ctx.pipe_ctx[i].top_pipe) { + + /* Find the top pipe in the new ctx for the bottom pipe that we + * want to remove by comparing the streams. If both pipes are being + * disabled then do it in the regular pipe programming sequence + */ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream && + !context->res_ctx.pipe_ctx[j].top_pipe && + !context->res_ctx.pipe_ctx[j].update_flags.bits.disable) { + found_stream = true; + break; + } + } + + // Disconnect if the top pipe lost it's pipe split + if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) { + hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); + mpcc_disconnected = true; + } + } + found_stream = false; + } + } + + if (mpcc_disconnected) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + + if (!pipe_ctx || !plane_state || !pipe_ctx->stream) + continue; + + // Only update scaler and viewport here if we lose a pipe split. + // This is to prevent half the screen from being black when we + // unlock after disconnecting MPCC. + if (!(old_pipe && !pipe_ctx->top_pipe && + !pipe_ctx->bottom_pipe && old_pipe->bottom_pipe)) + continue; + + if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) { + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); + /* scaler configuration */ + pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } + + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.position_change) || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { + + hubp->funcs->mem_program_viewport( + hubp, + &pipe_ctx->plane_res.scl_data.viewport, + &pipe_ctx->plane_res.scl_data.viewport_c); + } + } + } + } + return mpcc_disconnected; +} + +void dcn10_wait_for_pending_cleared(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *pipe_ctx; + struct timing_generator *tg; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + tg = pipe_ctx->stream_res.tg; + + /* + * Only wait for top pipe's tg penindg bit + * Also skip if pipe is disabled. + */ + if (pipe_ctx->top_pipe || + !pipe_ctx->stream || !pipe_ctx->plane_state || + !tg->funcs->is_tg_enabled(tg)) + continue; + + /* + * Wait for VBLANK then VACTIVE to ensure we get VUPDATE. + * For some reason waiting for OTG_UPDATE_PENDING cleared + * seems to not trigger the update right away, and if we + * lock again before VUPDATE then we don't get a separated + * operation. + */ + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + } +} + void dcn10_apply_ctx_for_surface( struct dc *dc, const struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 6d891166da8a4839e651e015dd422665fc562502..e5691e49902315f0165dc017ab68ff51b781c067 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -194,6 +194,12 @@ void dcn10_get_surface_visual_confirm_color( void dcn10_get_hdr_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +bool dcn10_disconnect_pipes( + struct dc *dc, + struct dc_state *context); + +void dcn10_wait_for_pending_cleared(struct dc *dc, + struct dc_state *context); void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx); void dcn10_verify_allow_pstate_change_high(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 5c98b71c1d47ab3715301b00e2c5e3293cd12fe4..a1d1559bb5d73003e7cf12b5370aa86971b89a10 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, + .disconnect_pipes = dcn10_disconnect_pipes, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .update_plane_addr = dcn10_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 17d5cb422025e13dfb237d2ef6979364c2b10cde..1abd81e17f09def2d3e63de556446c0929ea834f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1213,6 +1213,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; + bool mpo_enabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) @@ -1221,6 +1222,9 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; + if (context->stream_status[i].plane_count > 1) + mpo_enabled = true; + for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; @@ -1244,6 +1248,10 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont } } + /* Disable MPO in multi-display configurations. */ + if (context->stream_count > 1 && mpo_enabled) + return DC_FAIL_UNSUPPORTED_1; + /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel @@ -1331,6 +1339,47 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx) return value; } +/* + * Some architectures don't support soft-float (e.g. aarch64), on those + * this function has to be called with hardfloat enabled, make sure not + * to inline it so whatever fp stuff is done stays inside + */ +static noinline void dcn10_resource_construct_fp( + struct dc *dc) +{ + if (dc->ctx->dce_version == DCN_VERSION_1_01) { + struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; + struct dcn_ip_params *dcn_ip = dc->dcn_ip; + struct display_mode_lib *dml = &dc->dml; + + dml->ip.max_num_dpp = 3; + /* TODO how to handle 23.84? */ + dcn_soc->dram_clock_change_latency = 23; + dcn_ip->max_num_dpp = 3; + } + if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { + dc->dcn_soc->urgent_latency = 3; + dc->debug.disable_dmcu = true; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; + } + + + dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; + ASSERT(dc->dcn_soc->number_of_channels < 3); + if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ + dc->dcn_soc->number_of_channels = 2; + + if (dc->dcn_soc->number_of_channels == 1) { + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; + if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; + } + } +} + static bool dcn10_resource_construct( uint8_t num_virtual_links, struct dc *dc, @@ -1482,37 +1531,15 @@ static bool dcn10_resource_construct( memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); - if (dc->ctx->dce_version == DCN_VERSION_1_01) { - struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; - struct dcn_ip_params *dcn_ip = dc->dcn_ip; - struct display_mode_lib *dml = &dc->dml; - - dml->ip.max_num_dpp = 3; - /* TODO how to handle 23.84? */ - dcn_soc->dram_clock_change_latency = 23; - dcn_ip->max_num_dpp = 3; - } - if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { - dc->dcn_soc->urgent_latency = 3; - dc->debug.disable_dmcu = true; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; - } - - - dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; - ASSERT(dc->dcn_soc->number_of_channels < 3); - if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ - dc->dcn_soc->number_of_channels = 2; - - if (dc->dcn_soc->number_of_channels == 1) { - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; - if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; - } - } +#if defined(CONFIG_ARM64) + /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */ + DC_FP_START(); + dcn10_resource_construct_fp(dc); + DC_FP_END(); +#else + /* Other architectures we build for build this with soft-float */ + dcn10_resource_construct_fp(dc); +#endif pool->base.pp_smu = dcn10_pp_smu_create(ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 07b2f9399671dbca108cbb021d8c7cc988290739..842abb4c475bceddf05e48214bd6b611b3386efa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -121,35 +121,35 @@ void enc1_update_generic_info_packet( switch (packet_index) { case 0: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC0_FRAME_UPDATE, 1); + AFMT_GENERIC0_IMMEDIATE_UPDATE, 1); break; case 1: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC1_FRAME_UPDATE, 1); + AFMT_GENERIC1_IMMEDIATE_UPDATE, 1); break; case 2: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC2_FRAME_UPDATE, 1); + AFMT_GENERIC2_IMMEDIATE_UPDATE, 1); break; case 3: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC3_FRAME_UPDATE, 1); + AFMT_GENERIC3_IMMEDIATE_UPDATE, 1); break; case 4: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC4_FRAME_UPDATE, 1); + AFMT_GENERIC4_IMMEDIATE_UPDATE, 1); break; case 5: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC5_FRAME_UPDATE, 1); + AFMT_GENERIC5_IMMEDIATE_UPDATE, 1); break; case 6: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC6_FRAME_UPDATE, 1); + AFMT_GENERIC6_IMMEDIATE_UPDATE, 1); break; case 7: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC7_FRAME_UPDATE, 1); + AFMT_GENERIC7_IMMEDIATE_UPDATE, 1); break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index ed385b1477be77041c8a3655afc86e5b4449c7b3..30eae7459d5090ab2f6db5a938b872745bdeb4f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -281,7 +281,14 @@ struct dcn10_stream_enc_registers { SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\ @@ -345,7 +352,14 @@ struct dcn10_stream_enc_registers { type AFMT_GENERIC2_FRAME_UPDATE;\ type AFMT_GENERIC3_FRAME_UPDATE;\ type AFMT_GENERIC4_FRAME_UPDATE;\ + type AFMT_GENERIC0_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC1_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC2_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC3_IMMEDIATE_UPDATE;\ type AFMT_GENERIC4_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC5_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC6_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC7_IMMEDIATE_UPDATE;\ type AFMT_GENERIC5_FRAME_UPDATE;\ type AFMT_GENERIC6_FRAME_UPDATE;\ type AFMT_GENERIC7_FRAME_UPDATE;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 5fcaf78334ff9a96a24948422733a310bbe08fd2..624cb1341ef1450de587f1bdf849e45c1aaa7eca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -17,6 +17,10 @@ ifdef CONFIG_PPC64 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec endif +ifdef CONFIG_ARM64 +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only +endif + ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 667640c4b288a1c7cd5857f4651ba2833e01d7f6..1118e33aaa2cc96c5f0dddab2834b066507a4591 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -94,6 +94,7 @@ DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \ DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \ DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \ + DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \ DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \ DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \ DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 66180b4332f1d6b410d80602e5c606dfe8f1a866..c8cfd3ba1c1567155f179172a447d2c887a0fb21 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1457,8 +1457,8 @@ static void dcn20_update_dchubp_dpp( /* Any updates are handled in dc interface, just need to apply existing for plane enable */ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || - pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport) - && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && + pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 3dde6f26de47426796156c937a5fa1775c5d519c..966e1790b9bfde07c5b276ce8540ec2c6dd4201f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .disconnect_pipes = dcn10_disconnect_pipes, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index bf0044f7417ecd79e00b0f4cfad6030f5f2cec09..864acd695cbbf90287079af69f1ab15d63e3643d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -167,7 +167,9 @@ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ - LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) + LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh) #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\ LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\ @@ -229,8 +231,6 @@ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \ SRI(DPCSTX_TX_CNTL, DPCSTX, id), \ - SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \ - SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \ SR(RDPCSTX0_RDPCSTX_SCRATCH) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 790baf5526959aa845cbd72847f1f87ee4191d38..1b98744451345272648c4548558a15bcfa216837 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3141,7 +3141,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); @@ -3209,6 +3209,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive = dc->debug.enable_dram_clock_change_one_display_vactive; + /*Unsafe due to current pipe merge and split logic*/ + ASSERT(context != dc->current_state); + if (fast_validate) { return dcn20_validate_bandwidth_internal(dc, context, true); } @@ -3320,7 +3323,7 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat return DC_OK; } -static struct resource_funcs dcn20_res_pool_funcs = { +static const struct resource_funcs dcn20_res_pool_funcs = { .destroy = dcn20_destroy_resource_pool, .link_enc_create = dcn20_link_encoder_create, .panel_cntl_create = dcn20_panel_cntl_create, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 2c1959845c29a424d41dc47d662e6188dbc89ed1..cdd39ee9761d5f211272edd6aaea3bb6e3b176ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -95,7 +95,6 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst); void dcn20_dsc_destroy(struct display_stream_compressor **dsc); -void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); void dcn20_cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index 07684d3e375abdceafeb2c43c82259f35a61fb32..51a2f3d4c194b193160f3a98ddf593d4f4b431fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -13,6 +13,10 @@ ifdef CONFIG_PPC64 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec endif +ifdef CONFIG_ARM64 +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only +endif + ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index b187f71afa652535af945638e34d3b64ca8751fd..2ba880c3943c3d48c2aa2f5d1f74a47ba7318c7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .disconnect_pipes = dcn10_disconnect_pipes, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 88d41a385add8361dcf995701a17c5a59bc8fec7..78743ae37851f7f8725e8f48f6d1343613c0e216 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1184,6 +1184,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, BW_VAL_TRACE_COUNT(); + /*Unsafe due to current pipe merge and split logic*/ + ASSERT(context != dc->current_state); + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel); if (pipe_cnt == 0) @@ -1754,7 +1757,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat return result; } -static struct resource_funcs dcn21_res_pool_funcs = { +static const struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, .link_enc_create = dcn21_link_encoder_create, .panel_cntl_create = dcn21_panel_cntl_create, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h index 8e9fd59ccde82d05c06b66c65ee56253ec91a407..2fbf879cd327e13b06bf8f5875857e4ff98700ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h @@ -61,7 +61,10 @@ DPCS_DCN2_MASK_SH_LIST(mask_sh),\ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ - LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh) + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh) + void dcn30_link_encoder_construct( struct dcn20_link_encoder *enc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 9afee71604902d9a26e9af5b528031cd77a1a4a0..19daa456e3bfe752cc142912412f3ac7d4bde25c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .disconnect_pipes = dcn10_disconnect_pipes, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, .update_plane_addr = dcn20_update_plane_addr, .update_dchub = dcn10_update_dchub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 653a571e366d1b03369f060f66341ae07f8f0611..8be4f21169d07a66bb67a3fe71daf0ac69015859 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -340,7 +340,7 @@ static const struct dce110_clk_src_mask cs_mask = { #define abm_regs(id)\ [id] = {\ - ABM_DCN301_REG_LIST(id)\ + ABM_DCN30_REG_LIST(id)\ } static const struct dce_abm_registers abm_regs[] = { @@ -491,6 +491,8 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { [id] = {\ LE_DCN3_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_aux_registers_shift aux_shift = { @@ -871,7 +873,7 @@ void dcn30_dpp_destroy(struct dpp **dpp) *dpp = NULL; } -struct dpp *dcn30_dpp_create( +static struct dpp *dcn30_dpp_create( struct dc_context *ctx, uint32_t inst) { @@ -889,7 +891,8 @@ struct dpp *dcn30_dpp_create( kfree(dpp); return NULL; } -struct output_pixel_processor *dcn30_opp_create( + +static struct output_pixel_processor *dcn30_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = @@ -905,7 +908,7 @@ struct output_pixel_processor *dcn30_opp_create( return &opp->base; } -struct dce_aux *dcn30_aux_engine_create( +static struct dce_aux *dcn30_aux_engine_create( struct dc_context *ctx, uint32_t inst) { @@ -924,6 +927,7 @@ struct dce_aux *dcn30_aux_engine_create( return &aux_engine->base; } + #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } static const struct dce_i2c_registers i2c_hw_regs[] = { @@ -943,7 +947,7 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn30_i2c_hw_create( +static struct dce_i2c_hw *dcn30_i2c_hw_create( struct dc_context *ctx, uint32_t inst) { @@ -958,6 +962,7 @@ struct dce_i2c_hw *dcn30_i2c_hw_create( return dce_i2c_hw; } + static struct mpc *dcn30_mpc_create( struct dc_context *ctx, int num_mpcc, @@ -1008,7 +1013,7 @@ struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) return &hubbub3->base; } -struct timing_generator *dcn30_timing_generator_create( +static struct timing_generator *dcn30_timing_generator_create( struct dc_context *ctx, uint32_t instance) { @@ -1042,7 +1047,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn30_link_encoder_create( +static struct link_encoder *dcn30_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = @@ -1063,7 +1068,7 @@ struct link_encoder *dcn30_link_encoder_create( return &enc20->enc10.base; } -struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); @@ -1311,7 +1316,7 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); } -struct hubp *dcn30_hubp_create( +static struct hubp *dcn30_hubp_create( struct dc_context *ctx, uint32_t inst) { @@ -1330,7 +1335,7 @@ struct hubp *dcn30_hubp_create( return NULL; } -bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1355,7 +1360,7 @@ bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; @@ -2292,7 +2297,7 @@ static void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100)); } -static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { unsigned int i, j; unsigned int num_states = 0; @@ -2412,7 +2417,7 @@ static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); } -static struct resource_funcs dcn30_res_pool_funcs = { +static const struct resource_funcs dcn30_res_pool_funcs = { .destroy = dcn30_destroy_resource_pool, .link_enc_create = dcn30_link_encoder_create, .panel_cntl_create = dcn30_panel_cntl_create, @@ -2420,6 +2425,7 @@ static struct resource_funcs dcn30_res_pool_funcs = { .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, .set_mcif_arb_params = dcn30_set_mcif_arb_params, @@ -2683,7 +2689,7 @@ static bool dcn30_resource_construct( if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? &res_create_funcs : &res_create_maximus_funcs))) - goto create_fail; + goto create_fail; /* HW Sequencer and Plane caps */ dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index 4b4a4d81c1e3b62c816cb749a610917ee561c31a..c9d5f94092a04001f9711800e7c52586e76cc8ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -79,4 +79,7 @@ enum dc_status dcn30_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); + +void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + #endif /* _DCN30_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 417331438c306195914873dd653072efae1ed989..dbc7e2abe3795b4885bc78424e3d7d44c8503313 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -33,6 +33,10 @@ ifdef CONFIG_PPC64 dml_ccflags := -mhard-float -maltivec endif +ifdef CONFIG_ARM64 +dml_rcflags := -mgeneral-regs-only +endif + ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 @@ -60,6 +64,13 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_rcflags) endif ifdef CONFIG_DRM_AMD_DC_DCN3_0 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) -Wframe-larger-than=2048 @@ -67,6 +78,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index b54814f11b74b76ed5b5b4125ac2a58dc4a4d2bc..2beb284f89b0a914e8980ec049979e403c4efee7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -63,6 +63,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN30_MAX_DSC_IMAGE_WIDTH 5184 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3984,6 +3985,9 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1; + } else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) { + v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; + v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1; } else { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index afdd4f0d9d718bebbfcde08ab93b57268b91bf08..b320931360893809823ea244b22367dd05c06f89 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -467,7 +467,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0; - mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;; + mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable; mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable; mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_slices; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index ea29cf95d470b874eb214391865ff2ead9b3f0e0..f2624a1156e5c8cea21ede1e8694d9c20ded3783 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -10,6 +10,10 @@ ifdef CONFIG_PPC64 dsc_ccflags := -mhard-float -maltivec endif +ifdef CONFIG_ARM64 +dsc_rcflags := -mgeneral-regs-only +endif + ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 @@ -28,6 +32,7 @@ endif endif CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags) DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 8cdaa6eef5d370d18d5de77c07575010bcddcdf7..4c844cfaa95684581a622bba72e76003e2d7d492 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -34,6 +34,9 @@ /* default DSC policy target bitrate limit is 16bpp */ static uint32_t dsc_policy_max_target_bpp_limit = 16; +/* default DSC policy enables DSC only when needed */ +static bool dsc_policy_enable_dsc_when_not_needed; + static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing) { @@ -189,8 +192,10 @@ static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput) } -static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bpp_increment_div) +static bool dsc_bpp_increment_div_from_dpcd(uint8_t bpp_increment_dpcd, uint32_t *bpp_increment_div) { + // Mask bpp increment dpcd field to avoid reading other fields + bpp_increment_dpcd &= 0x7; switch (bpp_increment_dpcd) { case 0: @@ -360,7 +365,7 @@ static bool decide_dsc_target_bpp_x16( get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp, dsc_common_caps, timing, &range); - if (target_bandwidth_kbps >= range.stream_kbps) { + if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) { /* enough bandwidth without dsc */ *target_bpp_x16 = 0; should_use_dsc = false; @@ -961,9 +966,20 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc /* internal upper limit, default 16 bpp */ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) policy->max_target_bpp = dsc_policy_max_target_bpp_limit; + + /* enable DSC when not needed, default false */ + if (dsc_policy_enable_dsc_when_not_needed) + policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed; + else + policy->enable_dsc_when_not_needed = false; } void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) { dsc_policy_max_target_bpp_limit = limit; } + +void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable) +{ + dsc_policy_enable_dsc_when_not_needed = enable; +} diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index 0f2f4508e5646e1895de602184ac2a0b122f22e8..74c0943ed6441e3f118516833e0ee77004d1f93c 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -30,6 +30,18 @@ AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO)) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO) +############################################################################### +# DCE 6x +############################################################################### +# all DCE6.x are derived from DCE6.0 +ifdef CONFIG_DRM_AMD_DC_SI +GPIO_DCE60 = hw_translate_dce60.o hw_factory_dce60.o + +AMD_DAL_GPIO_DCE60 = $(addprefix $(AMDDALPATH)/dc/gpio/dce60/,$(GPIO_DCE60)) + +AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE60) +endif + ############################################################################### # DCE 8x ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c new file mode 100644 index 0000000000000000000000000000000000000000..cc69acd8ada7cef586db2fd9517682b93fb60db7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c @@ -0,0 +1,175 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "include/gpio_types.h" +#include "../hw_factory.h" + +#include "hw_factory_dce60.h" + +#include "../hw_gpio.h" +#include "../hw_ddc.h" +#include "../hw_hpd.h" +#include "../hw_generic.h" + +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + + +#define REG(reg_name)\ + mm ## reg_name + +#include "reg_helper.h" +#include "../hpd_regs.h" + +#define HPD_REG_LIST_DCE6(id) \ + HPD_GPIO_REG_LIST(id), \ + .int_status = mmDC_HPD ## id ## _INT_STATUS,\ + .toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL + +#define HPD_MASK_SH_LIST_DCE6(mask_sh) \ + .DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\ + .DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\ + .DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\ + .DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh + +#define hpd_regs(id) \ +{\ + HPD_REG_LIST_DCE6(id)\ +} + +static const struct hpd_registers hpd_regs[] = { + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5), + hpd_regs(6) +}; + +static const struct hpd_sh_mask hpd_shift = { + HPD_MASK_SH_LIST_DCE6(__SHIFT) +}; + +static const struct hpd_sh_mask hpd_mask = { + HPD_MASK_SH_LIST_DCE6(_MASK) +}; + +#include "../ddc_regs.h" + + /* set field name */ +#define SF_DDC(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +static const struct ddc_registers ddc_data_regs[] = { + ddc_data_regs(1), + ddc_data_regs(2), + ddc_data_regs(3), + ddc_data_regs(4), + ddc_data_regs(5), + ddc_data_regs(6), + ddc_vga_data_regs, + ddc_i2c_data_regs +}; + +static const struct ddc_registers ddc_clk_regs[] = { + ddc_clk_regs(1), + ddc_clk_regs(2), + ddc_clk_regs(3), + ddc_clk_regs(4), + ddc_clk_regs(5), + ddc_clk_regs(6), + ddc_vga_clk_regs, + ddc_i2c_clk_regs +}; + +static const struct ddc_sh_mask ddc_shift = { + DDC_MASK_SH_LIST(__SHIFT) +}; + +static const struct ddc_sh_mask ddc_mask = { + DDC_MASK_SH_LIST(_MASK) +}; + +static void define_ddc_registers( + struct hw_gpio_pin *pin, + uint32_t en) +{ + struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin); + + switch (pin->id) { + case GPIO_ID_DDC_DATA: + ddc->regs = &ddc_data_regs[en]; + ddc->base.regs = &ddc_data_regs[en].gpio; + break; + case GPIO_ID_DDC_CLOCK: + ddc->regs = &ddc_clk_regs[en]; + ddc->base.regs = &ddc_clk_regs[en].gpio; + break; + default: + ASSERT_CRITICAL(false); + return; + } + + ddc->shifts = &ddc_shift; + ddc->masks = &ddc_mask; + +} + +static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) +{ + struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin); + + hpd->regs = &hpd_regs[en]; + hpd->shifts = &hpd_shift; + hpd->masks = &hpd_mask; + hpd->base.regs = &hpd_regs[en].gpio; +} + +static const struct hw_factory_funcs funcs = { + .init_ddc_data = dal_hw_ddc_init, + .init_generic = NULL, + .init_hpd = dal_hw_hpd_init, + .get_ddc_pin = dal_hw_ddc_get_pin, + .get_hpd_pin = dal_hw_hpd_get_pin, + .get_generic_pin = NULL, + .define_hpd_registers = define_hpd_registers, + .define_ddc_registers = define_ddc_registers +}; + +void dal_hw_factory_dce60_init( + struct hw_factory *factory) +{ + factory->number_of_pins[GPIO_ID_DDC_DATA] = 8; + factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8; + factory->number_of_pins[GPIO_ID_GENERIC] = 7; + factory->number_of_pins[GPIO_ID_HPD] = 6; + factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31; + factory->number_of_pins[GPIO_ID_VIP_PAD] = 0; + factory->number_of_pins[GPIO_ID_SYNC] = 2; + factory->number_of_pins[GPIO_ID_GSL] = 4; + + factory->funcs = &funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h new file mode 100644 index 0000000000000000000000000000000000000000..1fd54ff8979c18f0bc15a46f1d69a472cffd9e0e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h @@ -0,0 +1,32 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_HW_FACTORY_DCE60_H__ +#define __DAL_HW_FACTORY_DCE60_H__ + +void dal_hw_factory_dce60_init( + struct hw_factory *factory); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c new file mode 100644 index 0000000000000000000000000000000000000000..255df31ec577c330daefaf7e3628778fbb06f322 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c @@ -0,0 +1,411 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +/* + * Pre-requisites: headers required by header of this unit + */ +#include "include/gpio_types.h" +#include "../hw_translate.h" + +#include "hw_translate_dce60.h" + +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" +#include "smu/smu_6_0_d.h" + +/* + * @brief + * Returns index of first bit (starting with LSB) which is set + */ +static uint32_t index_from_vector( + uint32_t vector) +{ + uint32_t result = 0; + uint32_t mask = 1; + + do { + if (vector == mask) + return result; + + ++result; + mask <<= 1; + } while (mask); + + BREAK_TO_DEBUGGER(); + + return GPIO_ENUM_UNKNOWN; +} + +static bool offset_to_id( + uint32_t offset, + uint32_t mask, + enum gpio_id *id, + uint32_t *en) +{ + switch (offset) { + /* GENERIC */ + case mmDC_GPIO_GENERIC_A: + *id = GPIO_ID_GENERIC; + switch (mask) { + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK: + *en = GPIO_GENERIC_A; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK: + *en = GPIO_GENERIC_B; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK: + *en = GPIO_GENERIC_C; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK: + *en = GPIO_GENERIC_D; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK: + *en = GPIO_GENERIC_E; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK: + *en = GPIO_GENERIC_F; + return true; + case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK: + *en = GPIO_GENERIC_G; + return true; + default: + BREAK_TO_DEBUGGER(); + return false; + } + break; + /* HPD */ + case mmDC_GPIO_HPD_A: + *id = GPIO_ID_HPD; + switch (mask) { + case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK: + *en = GPIO_HPD_1; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK: + *en = GPIO_HPD_2; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK: + *en = GPIO_HPD_3; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK: + *en = GPIO_HPD_4; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK: + *en = GPIO_HPD_5; + return true; + case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK: + *en = GPIO_HPD_6; + return true; + default: + BREAK_TO_DEBUGGER(); + return false; + } + break; + /* SYNCA */ + case mmDC_GPIO_SYNCA_A: + *id = GPIO_ID_SYNC; + switch (mask) { + case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK: + *en = GPIO_SYNC_HSYNC_A; + return true; + case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK: + *en = GPIO_SYNC_VSYNC_A; + return true; + default: + BREAK_TO_DEBUGGER(); + return false; + } + break; + /* mmDC_GPIO_GENLK_MASK */ + case mmDC_GPIO_GENLK_A: + *id = GPIO_ID_GSL; + switch (mask) { + case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK: + *en = GPIO_GSL_GENLOCK_CLOCK; + return true; + case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK: + *en = GPIO_GSL_GENLOCK_VSYNC; + return true; + case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK: + *en = GPIO_GSL_SWAPLOCK_A; + return true; + case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK: + *en = GPIO_GSL_SWAPLOCK_B; + return true; + default: + BREAK_TO_DEBUGGER(); + return false; + } + break; + /* GPIOPAD */ + case mmGPIOPAD_A: + *id = GPIO_ID_GPIO_PAD; + *en = index_from_vector(mask); + return (*en <= GPIO_GPIO_PAD_MAX); + /* DDC */ + /* we don't care about the GPIO_ID for DDC + * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK + * directly in the create method */ + case mmDC_GPIO_DDC1_A: + *en = GPIO_DDC_LINE_DDC1; + return true; + case mmDC_GPIO_DDC2_A: + *en = GPIO_DDC_LINE_DDC2; + return true; + case mmDC_GPIO_DDC3_A: + *en = GPIO_DDC_LINE_DDC3; + return true; + case mmDC_GPIO_DDC4_A: + *en = GPIO_DDC_LINE_DDC4; + return true; + case mmDC_GPIO_DDC5_A: + *en = GPIO_DDC_LINE_DDC5; + return true; + case mmDC_GPIO_DDC6_A: + *en = GPIO_DDC_LINE_DDC6; + return true; + case mmDC_GPIO_DDCVGA_A: + *en = GPIO_DDC_LINE_DDC_VGA; + return true; + /* GPIO_I2CPAD */ + case mmDC_GPIO_I2CPAD_A: + *en = GPIO_DDC_LINE_I2C_PAD; + return true; + /* Not implemented */ + case mmDC_GPIO_PWRSEQ_A: + case mmDC_GPIO_PAD_STRENGTH_1: + case mmDC_GPIO_PAD_STRENGTH_2: + case mmDC_GPIO_DEBUG: + return false; + /* UNEXPECTED */ + default: + BREAK_TO_DEBUGGER(); + return false; + } +} + +static bool id_to_offset( + enum gpio_id id, + uint32_t en, + struct gpio_pin_info *info) +{ + bool result = true; + + switch (id) { + case GPIO_ID_DDC_DATA: + info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK; + switch (en) { + case GPIO_DDC_LINE_DDC1: + info->offset = mmDC_GPIO_DDC1_A; + break; + case GPIO_DDC_LINE_DDC2: + info->offset = mmDC_GPIO_DDC2_A; + break; + case GPIO_DDC_LINE_DDC3: + info->offset = mmDC_GPIO_DDC3_A; + break; + case GPIO_DDC_LINE_DDC4: + info->offset = mmDC_GPIO_DDC4_A; + break; + case GPIO_DDC_LINE_DDC5: + info->offset = mmDC_GPIO_DDC5_A; + break; + case GPIO_DDC_LINE_DDC6: + info->offset = mmDC_GPIO_DDC6_A; + break; + case GPIO_DDC_LINE_DDC_VGA: + info->offset = mmDC_GPIO_DDCVGA_A; + break; + case GPIO_DDC_LINE_I2C_PAD: + info->offset = mmDC_GPIO_I2CPAD_A; + break; + default: + BREAK_TO_DEBUGGER(); + result = false; + } + break; + case GPIO_ID_DDC_CLOCK: + info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK; + switch (en) { + case GPIO_DDC_LINE_DDC1: + info->offset = mmDC_GPIO_DDC1_A; + break; + case GPIO_DDC_LINE_DDC2: + info->offset = mmDC_GPIO_DDC2_A; + break; + case GPIO_DDC_LINE_DDC3: + info->offset = mmDC_GPIO_DDC3_A; + break; + case GPIO_DDC_LINE_DDC4: + info->offset = mmDC_GPIO_DDC4_A; + break; + case GPIO_DDC_LINE_DDC5: + info->offset = mmDC_GPIO_DDC5_A; + break; + case GPIO_DDC_LINE_DDC6: + info->offset = mmDC_GPIO_DDC6_A; + break; + case GPIO_DDC_LINE_DDC_VGA: + info->offset = mmDC_GPIO_DDCVGA_A; + break; + case GPIO_DDC_LINE_I2C_PAD: + info->offset = mmDC_GPIO_I2CPAD_A; + break; + default: + BREAK_TO_DEBUGGER(); + result = false; + } + break; + case GPIO_ID_GENERIC: + info->offset = mmDC_GPIO_GENERIC_A; + switch (en) { + case GPIO_GENERIC_A: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK; + break; + case GPIO_GENERIC_B: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK; + break; + case GPIO_GENERIC_C: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK; + break; + case GPIO_GENERIC_D: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK; + break; + case GPIO_GENERIC_E: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK; + break; + case GPIO_GENERIC_F: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK; + break; + case GPIO_GENERIC_G: + info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK; + break; + default: + BREAK_TO_DEBUGGER(); + result = false; + } + break; + case GPIO_ID_HPD: + info->offset = mmDC_GPIO_HPD_A; + switch (en) { + case GPIO_HPD_1: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK; + break; + case GPIO_HPD_2: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK; + break; + case GPIO_HPD_3: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK; + break; + case GPIO_HPD_4: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK; + break; + case GPIO_HPD_5: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK; + break; + case GPIO_HPD_6: + info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK; + break; + default: + BREAK_TO_DEBUGGER(); + result = false; + } + break; + case GPIO_ID_SYNC: + switch (en) { + case GPIO_SYNC_HSYNC_A: + info->offset = mmDC_GPIO_SYNCA_A; + info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK; + break; + case GPIO_SYNC_VSYNC_A: + info->offset = mmDC_GPIO_SYNCA_A; + info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK; + break; + case GPIO_SYNC_HSYNC_B: + case GPIO_SYNC_VSYNC_B: + default: + BREAK_TO_DEBUGGER(); + result = false; + } + break; + case GPIO_ID_GSL: + switch (en) { + case GPIO_GSL_GENLOCK_CLOCK: + info->offset = mmDC_GPIO_GENLK_A; + info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK; + break; + case GPIO_GSL_GENLOCK_VSYNC: + info->offset = mmDC_GPIO_GENLK_A; + info->mask = + DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK; + break; + case GPIO_GSL_SWAPLOCK_A: + info->offset = mmDC_GPIO_GENLK_A; + info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK; + break; + case GPIO_GSL_SWAPLOCK_B: + info->offset = mmDC_GPIO_GENLK_A; + info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK; + break; + default: + BREAK_TO_DEBUGGER(); + result = false; + } + break; + case GPIO_ID_GPIO_PAD: + info->offset = mmGPIOPAD_A; + info->mask = (1 << en); + result = (info->mask <= GPIO_GPIO_PAD_MAX); + break; + case GPIO_ID_VIP_PAD: + default: + BREAK_TO_DEBUGGER(); + result = false; + } + + if (result) { + info->offset_y = info->offset + 2; + info->offset_en = info->offset + 1; + info->offset_mask = info->offset - 1; + + info->mask_y = info->mask; + info->mask_en = info->mask; + info->mask_mask = info->mask; + } + + return result; +} + +static const struct hw_translate_funcs funcs = { + .offset_to_id = offset_to_id, + .id_to_offset = id_to_offset, +}; + +void dal_hw_translate_dce60_init( + struct hw_translate *translate) +{ + translate->funcs = &funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h new file mode 100644 index 0000000000000000000000000000000000000000..1e811f35cec776da40d88529135d0a0598ea77da --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h @@ -0,0 +1,32 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_HW_TRANSLATE_DCE60_H__ +#define __DAL_HW_TRANSLATE_DCE60_H__ + +void dal_hw_translate_dce60_init( + struct hw_translate *tr); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index e5cfe28bc7bfc45229add0e02dfedee278a9c26a..6fc8a6e9dc156409bc340658f984b04e2b70df1d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -42,6 +42,9 @@ * Post-requisites: headers required by this unit */ +#if defined(CONFIG_DRM_AMD_DC_SI) +#include "dce60/hw_factory_dce60.h" +#endif #include "dce80/hw_factory_dce80.h" #include "dce110/hw_factory_dce110.h" #include "dce120/hw_factory_dce120.h" @@ -71,6 +74,13 @@ bool dal_hw_factory_init( } switch (dce_version) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case DCE_VERSION_6_0: + case DCE_VERSION_6_1: + case DCE_VERSION_6_4: + dal_hw_factory_dce60_init(factory); + return true; +#endif case DCE_VERSION_8_0: case DCE_VERSION_8_1: case DCE_VERSION_8_3: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index efea7cb0f17c109dee95cc2a41205d72bdbd54da..3a93c945e57d285ce1fba00bdf307be44d4c2495 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -40,6 +40,9 @@ * Post-requisites: headers required by this unit */ +#if defined(CONFIG_DRM_AMD_DC_SI) +#include "dce60/hw_translate_dce60.h" +#endif #include "dce80/hw_translate_dce80.h" #include "dce110/hw_translate_dce110.h" #include "dce120/hw_translate_dce120.h" @@ -69,6 +72,13 @@ bool dal_hw_translate_init( } switch (dce_version) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case DCE_VERSION_6_0: + case DCE_VERSION_6_1: + case DCE_VERSION_6_4: + dal_hw_translate_dce60_init(translate); + return true; +#endif case DCE_VERSION_8_0: case DCE_VERSION_8_1: case DCE_VERSION_8_3: diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 5994d2a33c40be33f43414b3391fe76803c6a8c1..947d6106f3413be62f8349faf82b8659d014386a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -230,6 +230,8 @@ struct clk_mgr_funcs { int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); + void (*set_low_power_state)(struct clk_mgr *clk_mgr); + void (*init_clocks)(struct clk_mgr *clk_mgr); void (*enable_pme_wa) (struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 72743058836da141eaff463fad2efd62a81b8754..949b61351edeac1a40c97f45007fe4352655c433 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -89,6 +89,11 @@ enum dentist_divider_range { .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL +#if defined(CONFIG_DRM_AMD_DC_SI) +#define CLK_COMMON_REG_LIST_DCE60_BASE() \ + SR(DENTIST_DISPCLK_CNTL) +#endif + #define CLK_COMMON_REG_LIST_DCN_BASE() \ SR(DENTIST_DISPCLK_CNTL) @@ -115,6 +120,12 @@ enum dentist_divider_range { CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh) +#if defined(CONFIG_DRM_AMD_DC_SI) +#define CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(mask_sh) \ + CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\ + CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh) +#endif + #define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index f9ab5abb64623fe07ceab8db676a539baef747dd..48eac622c6a0f67185c43c462299269c6f4e6015 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -49,6 +49,7 @@ struct panel_cntl_funcs { void (*store_backlight_level)(struct panel_cntl *panel_cntl); void (*driver_set_backlight)(struct panel_cntl *panel_cntl, uint32_t backlight_pwm_u16_16); + uint32_t (*get_current_backlight)(struct panel_cntl *panel_cntl); }; struct panel_cntl_init_data { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3c986717dcd5621f14218724106c1c59ec953842..64c1be818b0e8e017f5dae7b4dbc326130d68310 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -67,6 +67,10 @@ struct hw_sequencer_funcs { int num_planes, struct dc_state *context); void (*program_front_end_for_ctx)(struct dc *dc, struct dc_state *context); + bool (*disconnect_pipes)(struct dc *dc, + struct dc_state *context); + void (*wait_for_pending_cleared)(struct dc *dc, + struct dc_state *context); void (*post_unlock_program_front_end)(struct dc *dc, struct dc_state *context); void (*update_plane_addr)(const struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index 3352b79fb1cbc3c4a63332283e189c99115b34aa..405c253226077ecaaee95ff294b99f074a12805b 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -30,6 +30,17 @@ AMD_DAL_IRQ = $(addprefix $(AMDDALPATH)/dc/irq/,$(IRQ)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ) +############################################################################### +# DCE 6x +############################################################################### +ifdef CONFIG_DRM_AMD_DC_SI +IRQ_DCE60 = irq_service_dce60.o + +AMD_DAL_IRQ_DCE60 = $(addprefix $(AMDDALPATH)/dc/irq/dce60/,$(IRQ_DCE60)) + +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE60) +endif + ############################################################################### # DCE 8x ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c new file mode 100644 index 0000000000000000000000000000000000000000..524481885fd0adb2e3d8ff89722161f893430941 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c @@ -0,0 +1,395 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "dm_services.h" + +#include "include/logger_interface.h" + +#include "irq_service_dce60.h" +#include "../dce110/irq_service_dce110.h" + +#include "dce/dce_6_0_d.h" +#include "dce/dce_6_0_sh_mask.h" + +#include "ivsrcid/ivsrcid_vislands30.h" + +#define VISLANDS30_IV_SRCID_D1_VBLANK 1 +#define VISLANDS30_IV_SRCID_D2_VBLANK 2 +#define VISLANDS30_IV_SRCID_D3_VBLANK 3 +#define VISLANDS30_IV_SRCID_D4_VBLANK 4 +#define VISLANDS30_IV_SRCID_D5_VBLANK 5 +#define VISLANDS30_IV_SRCID_D6_VBLANK 6 + +#include "dc_types.h" + +static bool hpd_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + DC_HPD1_INT_STATUS, + DC_HPD1_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + DC_HPD1_INT_CONTROL, + DC_HPD1_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +static const struct irq_source_info_funcs hpd_irq_info_funcs = { + .set = NULL, + .ack = hpd_ack +}; + +static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs pflip_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vblank_irq_info_funcs = { + .set = dce110_vblank_set, + .ack = NULL +}; + +static const struct irq_source_info_funcs vblank_irq_info_funcs_dce60 = { + .set = NULL, + .ack = NULL +}; + +#define hpd_int_entry(reg_num)\ + [DC_IRQ_SOURCE_INVALID + reg_num] = {\ + .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\ + .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\ + .enable_value = {\ + DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\ + ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\ + },\ + .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\ + .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\ + .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\ + .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\ + .funcs = &hpd_irq_info_funcs\ + } + +#define hpd_rx_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD6 + reg_num] = {\ + .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\ + .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\ + .enable_value = {\ + DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\ + ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\ + .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\ + .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\ + .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\ + .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\ + .funcs = &hpd_rx_irq_info_funcs\ + } + +#define pflip_int_entry(reg_num)\ + [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\ + .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\ + .enable_mask =\ + GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\ + .enable_value = {\ + GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\ + ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\ + .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\ + .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\ + .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\ + .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\ + .funcs = &pflip_irq_info_funcs\ + } + +#define vupdate_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\ + .enable_mask =\ + CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\ + .enable_value = {\ + CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\ + ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\ + .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\ + .ack_mask =\ + CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ + .ack_value =\ + CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ + .funcs = &vblank_irq_info_funcs\ + } + +#define vblank_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ + .enable_reg = mmLB ## reg_num ## _INT_MASK,\ + .enable_mask =\ + INT_MASK__VBLANK_INT_MASK,\ + .enable_value = {\ + INT_MASK__VBLANK_INT_MASK,\ + ~INT_MASK__VBLANK_INT_MASK},\ + .ack_reg = mmLB ## reg_num ## _VBLANK_STATUS,\ + .ack_mask =\ + VBLANK_STATUS__VBLANK_ACK_MASK,\ + .ack_value =\ + VBLANK_STATUS__VBLANK_ACK_MASK,\ + .funcs = &vblank_irq_info_funcs_dce60\ + } + +#define dummy_irq_entry() \ + {\ + .funcs = &dummy_irq_info_funcs\ + } + +#define i2c_int_entry(reg_num) \ + [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry() + +#define dp_sink_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry() + +#define gpio_pad_int_entry(reg_num) \ + [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry() + +#define dc_underflow_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry() + + +static const struct irq_source_info_funcs dummy_irq_info_funcs = { + .set = dal_irq_service_dummy_set, + .ack = dal_irq_service_dummy_ack +}; + +static const struct irq_source_info +irq_source_info_dce60[DAL_IRQ_SOURCES_NUMBER] = { + [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(), + hpd_int_entry(1), + hpd_int_entry(2), + hpd_int_entry(3), + hpd_int_entry(4), + hpd_int_entry(5), + hpd_int_entry(6), + hpd_rx_int_entry(1), + hpd_rx_int_entry(2), + hpd_rx_int_entry(3), + hpd_rx_int_entry(4), + hpd_rx_int_entry(5), + hpd_rx_int_entry(6), + i2c_int_entry(1), + i2c_int_entry(2), + i2c_int_entry(3), + i2c_int_entry(4), + i2c_int_entry(5), + i2c_int_entry(6), + dp_sink_int_entry(1), + dp_sink_int_entry(2), + dp_sink_int_entry(3), + dp_sink_int_entry(4), + dp_sink_int_entry(5), + dp_sink_int_entry(6), + [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(), + pflip_int_entry(0), + pflip_int_entry(1), + pflip_int_entry(2), + pflip_int_entry(3), + pflip_int_entry(4), + pflip_int_entry(5), + [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), + gpio_pad_int_entry(0), + gpio_pad_int_entry(1), + gpio_pad_int_entry(2), + gpio_pad_int_entry(3), + gpio_pad_int_entry(4), + gpio_pad_int_entry(5), + gpio_pad_int_entry(6), + gpio_pad_int_entry(7), + gpio_pad_int_entry(8), + gpio_pad_int_entry(9), + gpio_pad_int_entry(10), + gpio_pad_int_entry(11), + gpio_pad_int_entry(12), + gpio_pad_int_entry(13), + gpio_pad_int_entry(14), + gpio_pad_int_entry(15), + gpio_pad_int_entry(16), + gpio_pad_int_entry(17), + gpio_pad_int_entry(18), + gpio_pad_int_entry(19), + gpio_pad_int_entry(20), + gpio_pad_int_entry(21), + gpio_pad_int_entry(22), + gpio_pad_int_entry(23), + gpio_pad_int_entry(24), + gpio_pad_int_entry(25), + gpio_pad_int_entry(26), + gpio_pad_int_entry(27), + gpio_pad_int_entry(28), + gpio_pad_int_entry(29), + gpio_pad_int_entry(30), + dc_underflow_int_entry(1), + dc_underflow_int_entry(2), + dc_underflow_int_entry(3), + dc_underflow_int_entry(4), + dc_underflow_int_entry(5), + dc_underflow_int_entry(6), + [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), + [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), + vupdate_int_entry(0), + vupdate_int_entry(1), + vupdate_int_entry(2), + vupdate_int_entry(3), + vupdate_int_entry(4), + vupdate_int_entry(5), + vblank_int_entry(0), + vblank_int_entry(1), + vblank_int_entry(2), + vblank_int_entry(3), + vblank_int_entry(4), + vblank_int_entry(5), +}; + +enum dc_irq_source to_dal_irq_source_dce60( + struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) +{ + switch (src_id) { + case VISLANDS30_IV_SRCID_D1_VBLANK: + return DC_IRQ_SOURCE_VBLANK1; + case VISLANDS30_IV_SRCID_D2_VBLANK: + return DC_IRQ_SOURCE_VBLANK2; + case VISLANDS30_IV_SRCID_D3_VBLANK: + return DC_IRQ_SOURCE_VBLANK3; + case VISLANDS30_IV_SRCID_D4_VBLANK: + return DC_IRQ_SOURCE_VBLANK4; + case VISLANDS30_IV_SRCID_D5_VBLANK: + return DC_IRQ_SOURCE_VBLANK5; + case VISLANDS30_IV_SRCID_D6_VBLANK: + return DC_IRQ_SOURCE_VBLANK6; + case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT: + return DC_IRQ_SOURCE_VUPDATE1; + case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT: + return DC_IRQ_SOURCE_VUPDATE2; + case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT: + return DC_IRQ_SOURCE_VUPDATE3; + case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT: + return DC_IRQ_SOURCE_VUPDATE4; + case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT: + return DC_IRQ_SOURCE_VUPDATE5; + case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT: + return DC_IRQ_SOURCE_VUPDATE6; + case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP: + return DC_IRQ_SOURCE_PFLIP1; + case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP: + return DC_IRQ_SOURCE_PFLIP2; + case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP: + return DC_IRQ_SOURCE_PFLIP3; + case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP: + return DC_IRQ_SOURCE_PFLIP4; + case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP: + return DC_IRQ_SOURCE_PFLIP5; + case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP: + return DC_IRQ_SOURCE_PFLIP6; + + case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A: + /* generic src_id for all HPD and HPDRX interrupts */ + switch (ext_id) { + case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A: + return DC_IRQ_SOURCE_HPD1; + case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B: + return DC_IRQ_SOURCE_HPD2; + case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C: + return DC_IRQ_SOURCE_HPD3; + case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D: + return DC_IRQ_SOURCE_HPD4; + case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E: + return DC_IRQ_SOURCE_HPD5; + case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F: + return DC_IRQ_SOURCE_HPD6; + case VISLANDS30_IV_EXTID_HPD_RX_A: + return DC_IRQ_SOURCE_HPD1RX; + case VISLANDS30_IV_EXTID_HPD_RX_B: + return DC_IRQ_SOURCE_HPD2RX; + case VISLANDS30_IV_EXTID_HPD_RX_C: + return DC_IRQ_SOURCE_HPD3RX; + case VISLANDS30_IV_EXTID_HPD_RX_D: + return DC_IRQ_SOURCE_HPD4RX; + case VISLANDS30_IV_EXTID_HPD_RX_E: + return DC_IRQ_SOURCE_HPD5RX; + case VISLANDS30_IV_EXTID_HPD_RX_F: + return DC_IRQ_SOURCE_HPD6RX; + default: + return DC_IRQ_SOURCE_INVALID; + } + break; + + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static const struct irq_service_funcs irq_service_funcs_dce60 = { + .to_dal_irq_source = to_dal_irq_source_dce60 +}; + +static void dce60_irq_construct( + struct irq_service *irq_service, + struct irq_service_init_data *init_data) +{ + dal_irq_service_construct(irq_service, init_data); + + irq_service->info = irq_source_info_dce60; + irq_service->funcs = &irq_service_funcs_dce60; +} + +struct irq_service *dal_irq_service_dce60_create( + struct irq_service_init_data *init_data) +{ + struct irq_service *irq_service = kzalloc(sizeof(*irq_service), + GFP_KERNEL); + + if (!irq_service) + return NULL; + + dce60_irq_construct(irq_service, init_data); + return irq_service; +} + + diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h new file mode 100644 index 0000000000000000000000000000000000000000..294db29e81157a4adfe1d58e68aa42b79adab7c4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h @@ -0,0 +1,40 @@ +/* + * Copyright 2020 Mauro Rossi + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_IRQ_SERVICE_DCE60_H__ +#define __DAL_IRQ_SERVICE_DCE60_H__ + +#include "../irq_service.h" + +enum dc_irq_source to_dal_irq_source_dce60( + struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id); + +struct irq_service *dal_irq_service_dce60_create( + struct irq_service_init_data *init_data); + +#endif + diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 33053b9fe6bd1879e79f3206fe790128fe0ea7ed..6bf27bde87240b1b4b87565d9e0c9c1b856d67b0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -32,6 +32,9 @@ #include "dce110/irq_service_dce110.h" +#if defined(CONFIG_DRM_AMD_DC_SI) +#include "dce60/irq_service_dce60.h" +#endif #include "dce80/irq_service_dce80.h" diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index c3bbfe397e8df57f884c5edfe9f303c8aff9f74d..330acaaed79aef8a9264bf6a87742fff8fe41faa 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -55,6 +55,10 @@ #include #define DC_FP_START() kernel_fpu_begin() #define DC_FP_END() kernel_fpu_end() +#elif defined(CONFIG_ARM64) +#include +#define DC_FP_START() kernel_neon_begin() +#define DC_FP_END() kernel_neon_end() #elif defined(CONFIG_PPC64) #include #include diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index b8040da94b9db649626d6ca0e7aa4d85d3bad4d7..944c0327763c1c940d6aee8e01c3c9cc1c113339 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -87,6 +87,17 @@ static void virtual_enc_dp_set_odm_combine( bool odm_combine) {} +static void virtual_dig_connect_to_otg( + struct stream_encoder *enc, + int tg_inst) +{} + +static void virtual_setup_stereo_sync( + struct stream_encoder *enc, + int tg_inst, + bool enable) +{} + static const struct stream_encoder_funcs virtual_str_enc_funcs = { .dp_set_odm_combine = virtual_enc_dp_set_odm_combine, @@ -114,6 +125,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = { .audio_mute_control = virtual_audio_mute_control, .set_avmute = virtual_stream_encoder_set_avmute, .hdmi_reset_stream_attribute = virtual_stream_encoder_reset_hdmi_stream_attribute, + .dig_connect_to_otg = virtual_dig_connect_to_otg, + .setup_stereo_sync = virtual_setup_stereo_sync, }; bool virtual_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index e013875b89ed4f59d46ccfd9a3df7d933d3106bf..d7e7f2eda92fd80bc2a1ec3338554cabbd90625c 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -36,11 +36,20 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0xe6d590b09 +#define DMUB_FW_VERSION_GIT_HASH 0x4e5b2f46f #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 25 -#define DMUB_FW_VERSION_UCODE ((DMUB_FW_VERSION_MAJOR << 24) | (DMUB_FW_VERSION_MINOR << 16) | DMUB_FW_VERSION_REVISION) +#define DMUB_FW_VERSION_REVISION 29 +#define DMUB_FW_VERSION_TEST 0 +#define DMUB_FW_VERSION_VBIOS 0 +#define DMUB_FW_VERSION_HOTFIX 0 +#define DMUB_FW_VERSION_UCODE (((DMUB_FW_VERSION_MAJOR & 0xFF) << 24) | \ + ((DMUB_FW_VERSION_MINOR & 0xFF) << 16) | \ + ((DMUB_FW_VERSION_REVISION & 0xFF) << 8) | \ + ((DMUB_FW_VERSION_TEST & 0x1) << 7) | \ + ((DMUB_FW_VERSION_VBIOS & 0x1) << 6) | \ + (DMUB_FW_VERSION_HOTFIX & 0x3F)) + #endif //================================================================== @@ -204,6 +213,7 @@ enum dmub_cmd_vbios_type { DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1, DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2, DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, + DMUB_CMD__VBIOS_LVTMA_CONTROL = 15, }; //============================================================================== diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index c30437ae839532bf1509e38ef2d959122d27e9ea..21011edea337b9cb5d62981ddb55988b460fa118 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -101,6 +101,13 @@ enum bp_pipe_control_action { ASIC_PIPE_INIT }; +enum bp_lvtma_control_action { + LVTMA_CONTROL_LCD_BLOFF = 2, + LVTMA_CONTROL_LCD_BLON = 3, + LVTMA_CONTROL_POWER_ON = 12, + LVTMA_CONTROL_POWER_OFF = 13 +}; + struct bp_encoder_control { enum bp_encoder_control_action action; enum engine_id engine_id; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index abeb58d544b11f52bcca22fb379d38b37d789d04..b267987aed068e69f03634f90da72d4c28e39e66 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -30,6 +30,34 @@ * ASIC internal revision ID */ +/* DCE60 (based on si_id.h in GPUOpen-Tools CodeXL) */ +#define SI_TAHITI_P_A0 0x01 +#define SI_TAHITI_P_B0 0x05 +#define SI_TAHITI_P_B1 0x06 +#define SI_PITCAIRN_PM_A0 0x14 +#define SI_PITCAIRN_PM_A1 0x15 +#define SI_CAPEVERDE_M_A0 0x28 +#define SI_CAPEVERDE_M_A1 0x29 +#define SI_OLAND_M_A0 0x3C +#define SI_HAINAN_V_A0 0x46 + +#define SI_UNKNOWN 0xFF + +#define ASIC_REV_IS_TAHITI_P(rev) \ + ((rev >= SI_TAHITI_P_A0) && (rev < SI_PITCAIRN_PM_A0)) + +#define ASIC_REV_IS_PITCAIRN_PM(rev) \ + ((rev >= SI_PITCAIRN_PM_A0) && (rev < SI_CAPEVERDE_M_A0)) + +#define ASIC_REV_IS_CAPEVERDE_M(rev) \ + ((rev >= SI_CAPEVERDE_M_A0) && (rev < SI_OLAND_M_A0)) + +#define ASIC_REV_IS_OLAND_M(rev) \ + ((rev >= SI_OLAND_M_A0) && (rev < SI_HAINAN_V_A0)) + +#define ASIC_REV_IS_HAINAN_V(rev) \ + ((rev >= SI_HAINAN_V_A0) && (rev < SI_UNKNOWN)) + /* DCE80 (based on ci_id.h in Perforce) */ #define CI_BONAIRE_M_A0 0x14 #define CI_BONAIRE_M_A1 0x15 @@ -181,6 +209,17 @@ enum { /* * ASIC chip ID */ + +/* DCE60 */ +#define DEVICE_ID_SI_TAHITI_P_6780 0x6780 +#define DEVICE_ID_SI_PITCAIRN_PM_6800 0x6800 +#define DEVICE_ID_SI_PITCAIRN_PM_6808 0x6808 +#define DEVICE_ID_SI_CAPEVERDE_M_6820 0x6820 +#define DEVICE_ID_SI_CAPEVERDE_M_6828 0x6828 +#define DEVICE_ID_SI_OLAND_M_6600 0x6600 +#define DEVICE_ID_SI_OLAND_M_6608 0x6608 +#define DEVICE_ID_SI_HAINAN_V_6660 0x6660 + /* DCE80 */ #define DEVICE_ID_KALINDI_9834 0x9834 #define DEVICE_ID_TEMASH_9839 0x9839 @@ -190,6 +229,7 @@ enum { #define DEVICE_ID_RENOIR_1636 0x1636 /* Asic Family IDs for different asic family. */ +#define FAMILY_SI 110 /* Southern Islands: Tahiti (P), Pitcairn (PM), Cape Verde (M), Oland (M), Hainan (V) */ #define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */ #define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */ #define FAMILY_VI 130 /* Volcanic Islands: Iceland (V), Tonga (M) */ diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index b67c9fa6b9cd4351847a2a119594188305c3125a..8aaa3af692021cb39123a0af78bbfdc0e616651e 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -34,6 +34,9 @@ struct dc_bios; enum dce_version { DCE_VERSION_UNKNOWN = (-1), + DCE_VERSION_6_0, + DCE_VERSION_6_1, + DCE_VERSION_6_4, DCE_VERSION_8_0, DCE_VERSION_8_1, DCE_VERSION_8_3, diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 89ef9f6860e5b7eb2cc530da0aaf74804731e93f..16df2a485dd0db64ea4c06f29ffaf85e62daf20f 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 81820f3d6b3b473c9fe9da2f84fd0fa8204f0458..d988533d4af5f4a43936ed1bbfacdd8a6643f85c 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. + * - Delta for CEIL: delta_from_mid_point_in_us_1 + * - Delta for FLOOR: delta_from_mid_point_in_us_2 */ - if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && - (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || - mid_point_frames_floor < 2)) { + if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) { + /* Check for out of range. + * If using CEIL produces a value that is out of range, + * then we are forced to use FLOOR. + */ + frames_to_insert = mid_point_frames_floor; + } else if (mid_point_frames_floor < 2) { + /* Check if FLOOR would result in non-LFC. In this case + * choose to use CEIL + */ + frames_to_insert = mid_point_frames_ceil; + } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + /* If choosing CEIL results in a frame duration that is + * closer to the mid point of the range. + * Choose CEIL + */ frames_to_insert = mid_point_frames_ceil; - delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - - delta_from_mid_point_in_us_1; } else { + /* If choosing FLOOR results in a frame duration that is + * closer to the mid point of the range. + * Choose FLOOR + */ frames_to_insert = mid_point_frames_floor; - delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 - - delta_from_mid_point_in_us_2; } /* Prefer current frame multiplier when BTR is enabled unless it drifts * too far from the midpoint */ + if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - + delta_from_mid_point_in_us_1; + } else { + delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 - + delta_from_mid_point_in_us_2; + } if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index 13c57ff2abdce9c3aeb235fac4d2f845b20b00d9..1ab813b4fd14f8c4bb1f1d59177b15b1a843d6b4 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -37,6 +37,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet); void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); + struct dc_info_packet *info_packet); #endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 7cd8a43d188962c45d92b05042f59bb4a4d006ec..0fdf7a3e96deaca5c5e5a0f0f9fb3cf1b04d6d95 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -421,15 +421,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, ***************************************************************************** */ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue) + struct dc_info_packet *info_packet) { unsigned int length = 5; bool hdmi_vic_mode = false; uint8_t checksum = 0; uint32_t i = 0; enum dc_timing_3d_format format; - bool bALLM = (bool)ALLMEnabled; - bool bALLMVal = (bool)ALLMValue; info_packet->valid = false; format = stream->timing.timing_3d_format; @@ -442,20 +440,13 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, && format == TIMING_3D_FORMAT_NONE) hdmi_vic_mode = true; - if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM) + if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode) return; info_packet->sb[1] = 0x03; info_packet->sb[2] = 0x0C; info_packet->sb[3] = 0x00; - if (bALLM) { - info_packet->sb[1] = 0xD8; - info_packet->sb[2] = 0x5D; - info_packet->sb[3] = 0xC4; - info_packet->sb[4] = HF_VSIF_VERSION; - } - if (format != TIMING_3D_FORMAT_NONE) info_packet->sb[4] = (2 << 5); @@ -490,9 +481,6 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, info_packet->hb1 = 0x01; info_packet->hb2 = (uint8_t) (length); - if (bALLM) - info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1); - checksum += info_packet->hb0; checksum += info_packet->hb1; checksum += info_packet->hb2; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 859724771a75dc6a7b4dc1e8c5ee3eaff7e2dbf0..61497954e67e52ab0870b233e14ab8309a3cf87b 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -657,7 +657,7 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame params, ram_table, big_endian); } -bool dmub_init_abm_config(struct abm *abm, +bool dmub_init_abm_config(struct resource_pool *res_pool, struct dmcu_iram_parameters params) { struct iram_table_v_2_2 ram_table; @@ -665,8 +665,13 @@ bool dmub_init_abm_config(struct abm *abm, bool result = false; uint32_t i, j = 0; - if (abm == NULL) +#if defined(CONFIG_DRM_AMD_DC_DCN3_0) + if (res_pool->abm == NULL && res_pool->multiple_abms[0] == NULL) return false; +#else + if (res_pool->abm == NULL) + return false; +#endif memset(&ram_table, 0, sizeof(ram_table)); memset(&config, 0, sizeof(config)); @@ -707,8 +712,14 @@ bool dmub_init_abm_config(struct abm *abm, config.min_abm_backlight = ram_table.min_abm_backlight; - result = abm->funcs->init_abm_config( - abm, (char *)(&config), sizeof(struct abm_config_table)); +#if defined(CONFIG_DRM_AMD_DC_DCN3_0) + if (res_pool->multiple_abms[0]) { + result = res_pool->multiple_abms[0]->funcs->init_abm_config( + res_pool->multiple_abms[0], (char *)(&config), sizeof(struct abm_config_table)); + } else +#endif + result = res_pool->abm->funcs->init_abm_config( + res_pool->abm, (char *)(&config), sizeof(struct abm_config_table)); return result; } diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 46fbca2e2cd1c850a68e62c1e87fdaa844b2cf40..fa4728d880920a47226ee44aa5964cc96e2b688a 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -28,6 +28,8 @@ #include "dc/inc/hw/dmcu.h" #include "dc/inc/hw/abm.h" +struct resource_pool; + enum abm_defines { abm_defines_max_level = 4, @@ -45,7 +47,7 @@ struct dmcu_iram_parameters { bool dmcu_load_iram(struct dmcu *dmcu, struct dmcu_iram_parameters params); -bool dmub_init_abm_config(struct abm *abm, +bool dmub_init_abm_config(struct resource_pool *res_pool, struct dmcu_iram_parameters params); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h index 27bb8c1ab85876bed4ad23bfc6df7648eaa8a851..b6f74bf4af023fd53ee79ce44ecb29cf9f8cb43b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h @@ -7376,8 +7376,6 @@ #define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2 -#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d -#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 // addressBlock: dce_dc_fmt4_dispdec // base address: 0x2000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h index ae798f7688534f55ea15570f996cb59bf638babf..9de01ae574c035a0b99ca09944b9c8341546099a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h @@ -4444,14 +4444,90 @@ /* Registers that spilled out of sid.h */ #define mmDATA_FORMAT 0x1AC0 +#define mmLB0_DATA_FORMAT 0x1AC0 +#define mmLB1_DATA_FORMAT 0x1DC0 +#define mmLB2_DATA_FORMAT 0x40C0 +#define mmLB3_DATA_FORMAT 0x43C0 +#define mmLB4_DATA_FORMAT 0x46C0 +#define mmLB5_DATA_FORMAT 0x49C0 #define mmDESKTOP_HEIGHT 0x1AC1 +#define mmLB0_DESKTOP_HEIGHT 0x1AC1 +#define mmLB1_DESKTOP_HEIGHT 0x1DC1 +#define mmLB2_DESKTOP_HEIGHT 0x40C1 +#define mmLB3_DESKTOP_HEIGHT 0x43C1 +#define mmLB4_DESKTOP_HEIGHT 0x46C1 +#define mmLB5_DESKTOP_HEIGHT 0x49C1 #define mmDC_LB_MEMORY_SPLIT 0x1AC3 +#define mmLB0_DC_LB_MEMORY_SPLIT 0x1AC3 +#define mmLB1_DC_LB_MEMORY_SPLIT 0x1DC3 +#define mmLB2_DC_LB_MEMORY_SPLIT 0x40C3 +#define mmLB3_DC_LB_MEMORY_SPLIT 0x43C3 +#define mmLB4_DC_LB_MEMORY_SPLIT 0x46C3 +#define mmLB5_DC_LB_MEMORY_SPLIT 0x49C3 +#define mmDC_LB_MEM_SIZE 0x1AC4 +#define mmLB0_DC_LB_MEM_SIZE 0x1AC4 +#define mmLB1_DC_LB_MEM_SIZE 0x1DC4 +#define mmLB2_DC_LB_MEM_SIZE 0x40C4 +#define mmLB3_DC_LB_MEM_SIZE 0x43C4 +#define mmLB4_DC_LB_MEM_SIZE 0x46C4 +#define mmLB5_DC_LB_MEM_SIZE 0x49C4 #define mmPRIORITY_A_CNT 0x1AC6 +#define mmLB0_PRIORITY_A_CNT 0x1AC6 +#define mmLB1_PRIORITY_A_CNT 0x1DC6 +#define mmLB2_PRIORITY_A_CNT 0x40C6 +#define mmLB3_PRIORITY_A_CNT 0x43C6 +#define mmLB4_PRIORITY_A_CNT 0x46C6 +#define mmLB5_PRIORITY_A_CNT 0x49C6 #define mmPRIORITY_B_CNT 0x1AC7 +#define mmLB0_PRIORITY_B_CNT 0x1AC7 +#define mmLB1_PRIORITY_B_CNT 0x1DC7 +#define mmLB2_PRIORITY_B_CNT 0x40C7 +#define mmLB3_PRIORITY_B_CNT 0x43C7 +#define mmLB4_PRIORITY_B_CNT 0x46C7 +#define mmLB5_PRIORITY_B_CNT 0x49C7 #define mmDPG_PIPE_ARBITRATION_CONTROL3 0x1B32 +#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL3 0x1B32 +#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL3 0x1E32 +#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL3 0x4132 +#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL3 0x4432 +#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL3 0x4732 +#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL3 0x4A32 #define mmINT_MASK 0x1AD0 +#define mmLB0_INT_MASK 0x1AD0 +#define mmLB1_INT_MASK 0x1DD0 +#define mmLB2_INT_MASK 0x40D0 +#define mmLB3_INT_MASK 0x43D0 +#define mmLB4_INT_MASK 0x46D0 +#define mmLB5_INT_MASK 0x49D0 #define mmVLINE_STATUS 0x1AEE +#define mmLB0_VLINE_STATUS 0x1AEE +#define mmLB1_VLINE_STATUS 0x1DEE +#define mmLB2_VLINE_STATUS 0x40EE +#define mmLB3_VLINE_STATUS 0x43EE +#define mmLB4_VLINE_STATUS 0x46EE +#define mmLB5_VLINE_STATUS 0x49EE #define mmVBLANK_STATUS 0x1AEF +#define mmLB0_VBLANK_STATUS 0x1AEF +#define mmLB1_VBLANK_STATUS 0x1DEF +#define mmLB2_VBLANK_STATUS 0x40EF +#define mmLB3_VBLANK_STATUS 0x43EF +#define mmLB4_VBLANK_STATUS 0x46EF +#define mmLB5_VBLANK_STATUS 0x49EF +#define mmSCL_HORZ_FILTER_INIT_RGB_LUMA 0x1B4C +#define mmSCL0_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x1B4C +#define mmSCL1_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x1E4C +#define mmSCL2_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x414C +#define mmSCL3_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x444C +#define mmSCL4_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x474C +#define mmSCL5_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x4A4C + +#define mmSCL_HORZ_FILTER_INIT_CHROMA 0x1B4D +#define mmSCL0_SCL_HORZ_FILTER_INIT_CHROMA 0x1B4D +#define mmSCL1_SCL_HORZ_FILTER_INIT_CHROMA 0x1E4D +#define mmSCL2_SCL_HORZ_FILTER_INIT_CHROMA 0x414D +#define mmSCL3_SCL_HORZ_FILTER_INIT_CHROMA 0x444D +#define mmSCL4_SCL_HORZ_FILTER_INIT_CHROMA 0x474D +#define mmSCL5_SCL_HORZ_FILTER_INIT_CHROMA 0x4A4D #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index abe05bc8075234001ba02834f5426b3a21e9e4d0..41c4a46ce357262747628903b38125ad5eb9d486 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -2076,6 +2076,8 @@ #define CRTC_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0x0000000c #define CRTC_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x00000010L #define CRTC_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x00000004 +#define CRTC_CONTROL__CRTC_PREFETCH_EN_MASK 0x10000000L +#define CRTC_CONTROL__CRTC_PREFETCH_EN__SHIFT 0x0000001c #define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN_MASK 0x00000001L #define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN__SHIFT 0x00000000 #define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT_MASK 0x0000001eL @@ -6364,6 +6366,8 @@ #define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x00000000 #define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000L #define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x00000010 +#define DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK_MASK 0x00030000L +#define DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT 0x00000010 #define DPG_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x00000001L #define DPG_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x00000000 #define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x00000010L @@ -6384,6 +6388,8 @@ #define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x00000008 #define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000010L #define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x00000004 +#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x00003000L +#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x0000000c #define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000L #define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x00000010 #define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x00000001L @@ -6406,6 +6412,8 @@ #define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x00000008 #define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x00000001L #define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x00000000 +#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x00003000L +#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0000000c #define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000L #define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x00000010 #define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x00000010L @@ -7256,6 +7264,8 @@ #define GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x00000008 #define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_MASK 0x000c0000L #define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT 0x00000012 +#define GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0x00f00000L +#define GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x00000014 #define GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0x0000000cL #define GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x00000002 #define GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000L @@ -9835,4 +9845,98 @@ #define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN_MASK 0x00000100L #define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008 +// DATA_FORMAT +#define DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L +#define DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x00000000 +#define DATA_FORMAT__RESET_REQ_AT_EOL_MASK 0x00000010L +#define DATA_FORMAT__RESET_REQ_AT_EOL__SHIFT 0x00000004 +#define DATA_FORMAT__PREFETCH_MASK 0x00001000L +#define DATA_FORMAT__PREFETCH__SHIFT 0x0000000c +#define DATA_FORMAT__SOF_READ_PT_MASK 0x001f0000L +#define DATA_FORMAT__SOF_READ_PT__SHIFT 0x00000010 +#define DATA_FORMAT__REQUEST_MODE_MASK 0x03000000L +#define DATA_FORMAT__REQUEST_MODE__SHIFT 0x00000018 +#define DATA_FORMAT__ALLOW_REQ_MODE_1_2_MASK 0x10000000L +#define DATA_FORMAT__ALLOW_REQ_MODE_1_2__SHIFT 0x0000001c + + +// DC_LB_MEMORY_SPLIT +#define DC_LB_MEMORY_SPLIT__LB_NUM_PARTITIONS_MASK 0x000f0000L +#define DC_LB_MEMORY_SPLIT__LB_NUM_PARTITIONS__SHIFT 0x00000010 +#define DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG_MASK 0x00300000L +#define DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT 0x00000014 + +// DC_LB_MEM_SIZE +#define DC_LB_MEM_SIZE__DC_LB_MEM_SIZE_MASK 0x000007ffL +#define DC_LB_MEM_SIZE__DC_LB_MEM_SIZE__SHIFT 0x00000000 + +// SCL_TAP_CONTROL +#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS_MASK 0x00000007L +#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS__SHIFT 0x00000000 +#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS_MASK 0x00000f00L +#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS__SHIFT 0x00000008 + +// INT_MASK +#define INT_MASK__VBLANK_INT_MASK 0x00000001L +#define INT_MASK__VBLANK_INT__SHIFT 0x00000000 +#define INT_MASK__VLINE_INT_MASK 0x00000010L +#define INT_MASK__VLINE_INT__SHIFT 0x00000004 + +// PRIORITY_A_CNT +#define PRIORITY_A_CNT__PRIORITY_MARK_A_MASK 0x00007fffL +#define PRIORITY_A_CNT__PRIORITY_MARK_A__SHIFT 0x00000000 +#define PRIORITY_A_CNT__PRIORITY_A_OFF_MASK 0x00010000L +#define PRIORITY_A_CNT__PRIORITY_A_OFF__SHIFT 0x00000010 +#define PRIORITY_A_CNT__PRIORITY_A_ALWAYS_ON_MASK 0x00100000L +#define PRIORITY_A_CNT__PRIORITY_A_ALWAYS_ON__SHIFT 0x00000014 +#define PRIORITY_A_CNT__PRIORITY_A_FORCE_MASK_MASK 0x01000000L +#define PRIORITY_A_CNT__PRIORITY_A_FORCE_MASK__SHIFT 0x00000018 + +// PRIORITY_B_CNT +#define PRIORITY_B_CNT__PRIORITY_MARK_B_MASK 0x00007fffL +#define PRIORITY_B_CNT__PRIORITY_MARK_B__SHIFT 0x00000000 +#define PRIORITY_B_CNT__PRIORITY_B_OFF_MASK 0x00010000L +#define PRIORITY_B_CNT__PRIORITY_B_OFF__SHIFT 0x00000010 +#define PRIORITY_B_CNT__PRIORITY_B_ALWAYS_ON_MASK 0x00100000L +#define PRIORITY_B_CNT__PRIORITY_B_ALWAYS_ON__SHIFT 0x00000014 +#define PRIORITY_B_CNT__PRIORITY_B_FORCE_MASK_MASK 0x01000000L +#define PRIORITY_B_CNT__PRIORITY_B_FORCE_MASK__SHIFT 0x00000018 + +// VLINE_STATUS +#define VLINE_STATUS__VLINE_OCCURRED_MASK 0x00000001L +#define VLINE_STATUS__VLINE_OCCURRED__SHIFT 0x00000000 +#define VLINE_STATUS__VLINE_ACK_MASK 0x00000010L +#define VLINE_STATUS__VLINE_ACK__SHIFT 0x00000004 +#define VLINE_STATUS__VLINE_STAT_MASK 0x00001000L +#define VLINE_STATUS__VLINE_STAT__SHIFT 0x0000000c +#define VLINE_STATUS__VLINE_INTERRUPT_MASK 0x00010000L +#define VLINE_STATUS__VLINE_INTERRUPT__SHIFT 0x00000010 +#define VLINE_STATUS__VLINE_INTERRUPT_TYPE_MASK 0x00020000L +#define VLINE_STATUS__VLINE_INTERRUPT_TYPE__SHIFT 0x00000011 + +// VBLANK_STATUS +#define VBLANK_STATUS__VBLANK_OCCURRED_MASK 0x00000001L +#define VBLANK_STATUS__VBLANK_OCCURRED__SHIFT 0x00000000 +#define VBLANK_STATUS__VBLANK_ACK_MASK 0x00000010L +#define VBLANK_STATUS__VBLANK_ACK__SHIFT 0x00000004 +#define VBLANK_STATUS__VBLANK_STAT_MASK 0x00001000L +#define VBLANK_STATUS__VBLANK_STAT__SHIFT 0x0000000c +#define VBLANK_STATUS__VBLANK_INTERRUPT_MASK 0x00010000L +#define VBLANK_STATUS__VBLANK_INTERRUPT__SHIFT 0x00000010 +#define VBLANK_STATUS__VBLANK_INTERRUPT_TYPE_MASK 0x00020000L +#define VBLANK_STATUS__VBLANK_INTERRUPT_TYPE__SHIFT 0x00000011 + +// SCL_HORZ_FILTER_INIT_RGB_LUMA +#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_FRAC_RGB_Y_MASK 0x0000ffffL +#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_FRAC_RGB_Y__SHIFT 0x00000000 +#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_INT_RGB_Y_MASK 0x000f0000L +#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_INT_RGB_Y__SHIFT 0x00000010 + +// SCL_HORZ_FILTER_INIT_CHROMA +#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_FRAC_CBCR_MASK 0x0000ffffL +#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_FRAC_CBCR__SHIFT 0x00000000 +#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_INT_CBCR_MASK 0x00070000L +#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_INT_CBCR__SHIFT 0x00000010 + + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h old mode 100755 new mode 100644 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h old mode 100755 new mode 100644 index 0e0319e98c07a7fc0bcaa6db96304182463c1793..ea683f452bb3324bb21bc0255284d82818e5d237 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h @@ -50271,6 +50271,10 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L + // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec //DSCCIF0_DSCCIF_CONFIG0 @@ -50789,6 +50793,9 @@ #define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L +//DSC_TOP1_DSC_DEBUG_CONTROL +#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L // addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec @@ -51308,6 +51315,10 @@ #define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L +//DSC_TOP2_DSC_DEBUG_CONTROL +#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L + // addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec //DSCCIF2_DSCCIF_CONFIG0 @@ -51826,6 +51837,9 @@ #define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L +//DSC_TOP3_DSC_DEBUG_CONTROL +#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L // addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec @@ -52346,6 +52360,10 @@ #define DSC_TOP4_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP4_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP4_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L +//DSC_TOP4_DSC_DEBUG_CONTROL +#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L + // addressBlock: dce_dc_dsc4_dispdec_dsccif_dispdec //DSCCIF4_DSCCIF_CONFIG0 @@ -52864,6 +52882,10 @@ #define DSC_TOP5_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP5_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP5_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L +//DSC_TOP5_DSC_DEBUG_CONTROL +#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L + // addressBlock: dce_dc_dsc5_dispdec_dsccif_dispdec //DSCCIF5_DSCCIF_CONFIG0 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h old mode 100755 new mode 100644 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h old mode 100755 new mode 100644 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h index 05d1b0a5f6d25f4259cd3f0d8366f16e2939ea44..cbaad7d83194383a6bdeb246cf5f8ea88e37d95a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h @@ -9180,6 +9180,8 @@ #define mmRLC_GPM_THREAD_ENABLE_BASE_IDX 1 #define mmRLC_RLCG_DOORBELL_RANGE 0x4c47 #define mmRLC_RLCG_DOORBELL_RANGE_BASE_IDX 1 +#define mmRLC_CGTT_MGCG_OVERRIDE 0x4c48 +#define mmRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1 #define mmRLC_CGCG_CGLS_CTRL 0x4c49 #define mmRLC_CGCG_CGLS_CTRL_BASE_IDX 1 #define mmRLC_CGCG_RAMP_CTRL 0x4c4a diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h index aac57f714cf1650b85125223a6d5f643f6ebf818..c2d035ef3e94e472ecd6d6e6526b11e380e5589c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h @@ -32315,6 +32315,31 @@ #define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L #define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L #define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L +//RLC_CGTT_MGCG_OVERRIDE +#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0__SHIFT 0x0 +#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1 +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2 +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3 +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4 +#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5 +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6 +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7 +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8 +#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_9__SHIFT 0x9 +#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY__SHIFT 0x10 +#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17__SHIFT 0x11 +#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0_MASK 0x00000001L +#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L +#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L +#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L +#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_9_MASK 0x0000FE00L +#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK 0x00010000L +#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17_MASK 0xFFFE0000L //RLC_RLCG_DOORBELL_STAT #define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0 #define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h new file mode 100644 index 0000000000000000000000000000000000000000..3685766c4d566b89d936f3cb4bdd4b55e0a54b42 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _umc_8_7_0_OFFSET_HEADER +#define _umc_8_7_0_OFFSET_HEADER + +#define mmUMCCH0_0_GeccErrCntSel 0x0328 +#define mmUMCCH0_0_GeccErrCntSel_BASE_IDX 0 +#define mmUMCCH0_0_GeccErrCnt 0x0329 +#define mmUMCCH0_0_GeccErrCnt_BASE_IDX 0 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0 +#define mmMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4 +#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..4c5097fa0c0986f3fe196d9f871dec0f9bf9a541 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h @@ -0,0 +1,79 @@ +#ifndef _umc_8_7_0_SH_MASK_HEADER +#define _umc_8_7_0_SH_MASK_HEADER + +//UMCCH0_0_GeccErrCntSel +#define UMCCH0_0_GeccErrCntSel__GeccErrCntCsSel__SHIFT 0x0 +#define UMCCH0_0_GeccErrCntSel__GeccErrInt__SHIFT 0xc +#define UMCCH0_0_GeccErrCntSel__GeccErrCntEn__SHIFT 0xf +#define UMCCH0_0_GeccErrCntSel__PoisonCntEn__SHIFT 0x10 +#define UMCCH0_0_GeccErrCntSel__GeccErrCntCsSel_MASK 0x0000000FL +#define UMCCH0_0_GeccErrCntSel__GeccErrInt_MASK 0x00003000L +#define UMCCH0_0_GeccErrCntSel__GeccErrCntEn_MASK 0x00008000L +#define UMCCH0_0_GeccErrCntSel__PoisonCntEn_MASK 0x00030000L +//UMCCH0_0_GeccErrCnt +#define UMCCH0_0_GeccErrCnt__GeccErrCnt__SHIFT 0x0 +#define UMCCH0_0_GeccErrCnt__GeccUnCorrErrCnt__SHIFT 0x10 +#define UMCCH0_0_GeccErrCnt__GeccErrCnt_MASK 0x0000FFFFL +#define UMCCH0_0_GeccErrCnt__GeccUnCorrErrCnt_MASK 0xFFFF0000L +//MCA_UMC_UMC0_MCUMC_STATUST0 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16 +#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26 +#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29 +#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b +#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred__SHIFT 0x2c +#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC__SHIFT 0x2d +#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC__SHIFT 0x2e +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV47__SHIFT 0x2f +#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent__SHIFT 0x34 +#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV__SHIFT 0x35 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV54__SHIFT 0x36 +#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC__SHIFT 0x37 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC__SHIFT 0x39 +#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV__SHIFT 0x3a +#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV__SHIFT 0x3b +#define MCA_UMC_UMC0_MCUMC_STATUST0__En__SHIFT 0x3c +#define MCA_UMC_UMC0_MCUMC_STATUST0__UC__SHIFT 0x3d +#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow__SHIFT 0x3e +#define MCA_UMC_UMC0_MCUMC_STATUST0__Val__SHIFT 0x3f +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode_MASK 0x000000000000FFFFL +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt_MASK 0x00000000003F0000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV22_MASK 0x0000000000C00000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrLsb_MASK 0x000000003F000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV30_MASK 0x00000000C0000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId_MASK 0x0000003F00000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV38_MASK 0x000000C000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub_MASK 0x0000010000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV41_MASK 0x0000060000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison_MASK 0x0000080000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred_MASK 0x0000100000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC_MASK 0x0000200000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC_MASK 0x0000400000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV47_MASK 0x000F800000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent_MASK 0x0010000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV_MASK 0x0020000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV54_MASK 0x0040000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC_MASK 0x0080000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal_MASK 0x0100000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC_MASK 0x0200000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV_MASK 0x0400000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV_MASK 0x0800000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__En_MASK 0x1000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__UC_MASK 0x2000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow_MASK 0x4000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Val_MASK 0x8000000000000000L +//MCA_UMC_UMC0_MCUMC_ADDRT0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x3e +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL +#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB_MASK 0x3F00000000000000L +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xC000000000000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index a3c238c39ef57233d76465419da5876ba6e886db..fc592f60e6a04916302c3f8e0eb20a863a7ce7e9 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -212,10 +212,6 @@ struct tile_config { * IH ring entry. This function allows the KFD ISR to get the VMID * from the fault status register as early as possible. * - * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled - * - * @get_unique_id: Returns uuid id of current device - * * This structure contains function pointers to services that the kgd driver * provides to amdkfd driver. * @@ -290,9 +286,6 @@ struct kfd2kgd_calls { void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); - uint64_t (*get_hive_id)(struct kgd_dev *kgd); - uint64_t (*get_unique_id)(struct kgd_dev *kgd); - }; #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index a7f92d0b3a90b9e840ad931c56fa92b8a6351b1d..0aec28fda0587ea7d8796726aad4e9bfed682f58 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -322,6 +322,115 @@ struct amd_pm_funcs { int (*asic_reset_mode_2)(void *handle); int (*set_df_cstate)(void *handle, enum pp_df_cstate state); int (*set_xgmi_pstate)(void *handle, uint32_t pstate); + ssize_t (*get_gpu_metrics)(void *handle, void **table); +}; + +struct metrics_table_header { + uint16_t structure_size; + uint8_t format_revision; + uint8_t content_revision; +}; + +struct gpu_metrics_v1_0 { + struct metrics_table_header common_header; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Temperature */ + uint16_t temperature_edge; + uint16_t temperature_hotspot; + uint16_t temperature_mem; + uint16_t temperature_vrgfx; + uint16_t temperature_vrsoc; + uint16_t temperature_vrmem; + + /* Utilization */ + uint16_t average_gfx_activity; + uint16_t average_umc_activity; // memory controller + uint16_t average_mm_activity; // UVD or VCN + + /* Power/Energy */ + uint16_t average_socket_power; + uint32_t energy_accumulator; + + /* Average clocks */ + uint16_t average_gfxclk_frequency; + uint16_t average_socclk_frequency; + uint16_t average_uclk_frequency; + uint16_t average_vclk0_frequency; + uint16_t average_dclk0_frequency; + uint16_t average_vclk1_frequency; + uint16_t average_dclk1_frequency; + + /* Current clocks */ + uint16_t current_gfxclk; + uint16_t current_socclk; + uint16_t current_uclk; + uint16_t current_vclk0; + uint16_t current_dclk0; + uint16_t current_vclk1; + uint16_t current_dclk1; + + /* Throttle status */ + uint32_t throttle_status; + + /* Fans */ + uint16_t current_fan_speed; + + /* Link width/speed */ + uint8_t pcie_link_width; + uint8_t pcie_link_speed; // in 0.1 GT/s +}; + +struct gpu_metrics_v2_0 { + struct metrics_table_header common_header; + + /* Driver attached timestamp (in ns) */ + uint64_t system_clock_counter; + + /* Temperature */ + uint16_t temperature_gfx; // gfx temperature on APUs + uint16_t temperature_soc; // soc temperature on APUs + uint16_t temperature_core[8]; // CPU core temperature on APUs + uint16_t temperature_l3[2]; + + /* Utilization */ + uint16_t average_gfx_activity; + uint16_t average_mm_activity; // UVD or VCN + + /* Power/Energy */ + uint16_t average_socket_power; // dGPU + APU power on A + A platform + uint16_t average_cpu_power; + uint16_t average_soc_power; + uint16_t average_gfx_power; + uint16_t average_core_power[8]; // CPU core power on APUs + + /* Average clocks */ + uint16_t average_gfxclk_frequency; + uint16_t average_socclk_frequency; + uint16_t average_uclk_frequency; + uint16_t average_fclk_frequency; + uint16_t average_vclk_frequency; + uint16_t average_dclk_frequency; + + /* Current clocks */ + uint16_t current_gfxclk; + uint16_t current_socclk; + uint16_t current_uclk; + uint16_t current_fclk; + uint16_t current_vclk; + uint16_t current_dclk; + uint16_t current_coreclk[8]; // CPU core clocks + uint16_t current_l3clk[2]; + + /* Throttle status */ + uint32_t throttle_status; + + /* Fans */ + uint16_t fan_pwm; + + uint16_t padding; }; #endif diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f01e86030cd1607b4406234e49711f2136148eaa --- /dev/null +++ b/drivers/gpu/drm/amd/pm/Makefile @@ -0,0 +1,46 @@ +# +# Copyright 2017 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +subdir-ccflags-y += \ + -I$(FULL_AMD_PATH)/pm/inc/ \ + -I$(FULL_AMD_PATH)/include/asic_reg \ + -I$(FULL_AMD_PATH)/include \ + -I$(FULL_AMD_PATH)/pm/swsmu \ + -I$(FULL_AMD_PATH)/pm/swsmu/smu11 \ + -I$(FULL_AMD_PATH)/pm/swsmu/smu12 \ + -I$(FULL_AMD_PATH)/pm/powerplay \ + -I$(FULL_AMD_PATH)/pm/powerplay/smumgr\ + -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr + +AMD_PM_PATH = ../pm + +PM_LIBS = swsmu powerplay + +AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS))) + +include $(AMD_PM) + +PM_MGR = amdgpu_dpm.o amdgpu_pm.o + +AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_PM_POWER) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c similarity index 76% rename from drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c rename to drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 2082c0acd216d015a9e0c174490d1bd6d140e7e7..17a45baff638c651649c983b66ada6cf575c44ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -28,6 +28,11 @@ #include "amdgpu_dpm.h" #include "atom.h" #include "amd_pcie.h" +#include "amdgpu_display.h" +#include "hwmgr.h" +#include + +#define WIDTH_4K 3840 void amdgpu_dpm_print_class_info(u32 class, u32 class2) { @@ -117,7 +122,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) { - struct drm_device *ddev = adev->ddev; + struct drm_device *ddev = adev_to_drm(adev); struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; @@ -138,7 +143,7 @@ void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; u32 vblank_in_pixels; @@ -165,7 +170,7 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) { - struct drm_device *dev = adev->ddev; + struct drm_device *dev = adev_to_drm(adev); struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; u32 vrefresh = 0; @@ -1110,8 +1115,6 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) struct smu_context *smu = &adev->smu; int ret = 0; - dev_info(adev->dev, "GPU BACO reset\n"); - if (is_support_sw_smu(adev)) { ret = smu_baco_enter(smu); if (ret) @@ -1216,3 +1219,469 @@ int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) return 0; } + +int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = + adev->powerplay.pp_funcs; + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (is_support_sw_smu(adev)) + ret = smu_enable_mgpu_fan_boost(smu); + else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) + ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); + + return ret; +} + +int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, + uint32_t msg_id) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = + adev->powerplay.pp_funcs; + int ret = 0; + + if (pp_funcs && pp_funcs->set_clockgating_by_smu) + ret = pp_funcs->set_clockgating_by_smu(pp_handle, + msg_id); + + return ret; +} + +int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, + bool acquire) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = + adev->powerplay.pp_funcs; + int ret = -EOPNOTSUPP; + + if (pp_funcs && pp_funcs->smu_i2c_bus_access) + ret = pp_funcs->smu_i2c_bus_access(pp_handle, + acquire); + + return ret; +} + +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) +{ + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + if (power_supply_is_system_supplied() > 0) + adev->pm.ac_power = true; + else + adev->pm.ac_power = false; + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_bapm) + amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); + mutex_unlock(&adev->pm.mutex); + + if (is_support_sw_smu(adev)) + smu_set_ac_dc(&adev->smu); + } +} + +int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + if (!data || !size) + return -EINVAL; + + if (is_support_sw_smu(adev)) + ret = smu_read_sensor(&adev->smu, sensor, data, size); + else { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) + ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, + sensor, data, size); + else + ret = -EINVAL; + } + + return ret; +} + +void amdgpu_dpm_thermal_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, + pm.dpm.thermal.work); + /* switch to the thermal state */ + enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; + int temp, size = sizeof(temp); + + if (!adev->pm.dpm_enabled) + return; + + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, + (void *)&temp, &size)) { + if (temp < adev->pm.dpm.thermal.min_temp) + /* switch back the user state */ + dpm_state = adev->pm.dpm.user_state; + } else { + if (adev->pm.dpm.thermal.high_to_low) + /* switch back the user state */ + dpm_state = adev->pm.dpm.user_state; + } + mutex_lock(&adev->pm.mutex); + if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) + adev->pm.dpm.thermal_active = true; + else + adev->pm.dpm.thermal_active = false; + adev->pm.dpm.state = dpm_state; + mutex_unlock(&adev->pm.mutex); + + amdgpu_pm_compute_clocks(adev); +} + +static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, + enum amd_pm_state_type dpm_state) +{ + int i; + struct amdgpu_ps *ps; + u32 ui_class; + bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? + true : false; + + /* check if the vblank period is too short to adjust the mclk */ + if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { + if (amdgpu_dpm_vblank_too_short(adev)) + single_display = false; + } + + /* certain older asics have a separare 3D performance state, + * so try that first if the user selected performance + */ + if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) + dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; + /* balanced states don't exist at the moment */ + if (dpm_state == POWER_STATE_TYPE_BALANCED) + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + +restart_search: + /* Pick the best power state based on current conditions */ + for (i = 0; i < adev->pm.dpm.num_ps; i++) { + ps = &adev->pm.dpm.ps[i]; + ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; + switch (dpm_state) { + /* user states */ + case POWER_STATE_TYPE_BATTERY: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_BALANCED: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_PERFORMANCE: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + /* internal states */ + case POWER_STATE_TYPE_INTERNAL_UVD: + if (adev->pm.dpm.uvd_ps) + return adev->pm.dpm.uvd_ps; + else + break; + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_BOOT: + return adev->pm.dpm.boot_ps; + case POWER_STATE_TYPE_INTERNAL_THERMAL: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ACPI: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ULV: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_3DPERF: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + return ps; + break; + default: + break; + } + } + /* use a fallback state if we didn't match */ + switch (dpm_state) { + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (adev->pm.dpm.uvd_ps) { + return adev->pm.dpm.uvd_ps; + } else { + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + } + case POWER_STATE_TYPE_INTERNAL_THERMAL: + dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_ACPI: + dpm_state = POWER_STATE_TYPE_BATTERY; + goto restart_search; + case POWER_STATE_TYPE_BATTERY: + case POWER_STATE_TYPE_BALANCED: + case POWER_STATE_TYPE_INTERNAL_3DPERF: + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + default: + break; + } + + return NULL; +} + +static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) +{ + struct amdgpu_ps *ps; + enum amd_pm_state_type dpm_state; + int ret; + bool equal = false; + + /* if dpm init failed */ + if (!adev->pm.dpm_enabled) + return; + + if (adev->pm.dpm.user_state != adev->pm.dpm.state) { + /* add other state override checks here */ + if ((!adev->pm.dpm.thermal_active) && + (!adev->pm.dpm.uvd_active)) + adev->pm.dpm.state = adev->pm.dpm.user_state; + } + dpm_state = adev->pm.dpm.state; + + ps = amdgpu_dpm_pick_power_state(adev, dpm_state); + if (ps) + adev->pm.dpm.requested_ps = ps; + else + return; + + if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { + printk("switching from power state:\n"); + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); + printk("switching to power state:\n"); + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); + } + + /* update whether vce is active */ + ps->vce_active = adev->pm.dpm.vce_active; + if (adev->powerplay.pp_funcs->display_configuration_changed) + amdgpu_dpm_display_configuration_changed(adev); + + ret = amdgpu_dpm_pre_set_power_state(adev); + if (ret) + return; + + if (adev->powerplay.pp_funcs->check_state_equal) { + if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) + equal = false; + } + + if (equal) + return; + + amdgpu_dpm_set_power_state(adev); + amdgpu_dpm_post_set_power_state(adev); + + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; + + if (adev->powerplay.pp_funcs->force_performance_level) { + if (adev->pm.dpm.thermal_active) { + enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; + /* force low perf level for thermal */ + amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); + /* save the user's level */ + adev->pm.dpm.forced_level = level; + } else { + /* otherwise, user selected level */ + amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); + } + } +} + +void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) +{ + int i = 0; + + if (!adev->pm.dpm_enabled) + return; + + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) + amdgpu_fence_wait_empty(ring); + } + + if (is_support_sw_smu(adev)) { + struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; + smu_handle_task(&adev->smu, + smu_dpm->dpm_level, + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + true); + } else { + if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (!amdgpu_device_has_dc_support(adev)) { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ + if (adev->pm.pm_display_cfg.vrefresh > 120) + adev->pm.pm_display_cfg.min_vblank_time = 0; + if (adev->powerplay.pp_funcs->display_configuration_change) + adev->powerplay.pp_funcs->display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); + mutex_unlock(&adev->pm.mutex); + } + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); + } else { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + amdgpu_dpm_change_power_state_locked(adev); + mutex_unlock(&adev->pm.mutex); + } + } +} + +void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) +{ + int ret = 0; + + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + } else { + adev->pm.dpm.uvd_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_pm_compute_clocks(adev); + } else { + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); + if (ret) + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", + enable ? "enable" : "disable", ret); + + /* enable/disable Low Memory PState for UVD (4k videos) */ + if (adev->asic_type == CHIP_STONEY && + adev->uvd.decode_image_width >= WIDTH_4K) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if (hwmgr && hwmgr->hwmgr_func && + hwmgr->hwmgr_func->update_nbdpm_pstate) + hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, + !enable, + true); + } + } +} + +void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) +{ + int ret = 0; + + if (adev->family == AMDGPU_FAMILY_SI) { + mutex_lock(&adev->pm.mutex); + if (enable) { + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; + } else { + adev->pm.dpm.vce_active = false; + } + mutex_unlock(&adev->pm.mutex); + + amdgpu_pm_compute_clocks(adev); + } else { + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); + if (ret) + DRM_ERROR("Dpm %s vce failed, ret = %d. \n", + enable ? "enable" : "disable", ret); + } +} + +void amdgpu_pm_print_power_states(struct amdgpu_device *adev) +{ + int i; + + if (adev->powerplay.pp_funcs->print_power_state == NULL) + return; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) + amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); + +} + +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) +{ + int ret = 0; + + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); + if (ret) + DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", + enable ? "enable" : "disable", ret); +} + +int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) +{ + int r; + + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { + r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); + if (r) { + pr_err("smu firmware loading failed\n"); + return r; + } + *smu_version = adev->pm.fw_version; + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c similarity index 81% rename from drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c rename to drivers/gpu/drm/amd/pm/amdgpu_pm.c index e4dbf14320b6180a884c7f47c11892e9508b19b3..2d924e88a215b960c689e7e22680b5f0e3509829 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -29,17 +29,14 @@ #include "amdgpu_drv.h" #include "amdgpu_pm.h" #include "amdgpu_dpm.h" -#include "amdgpu_display.h" #include "amdgpu_smu.h" #include "atom.h" -#include #include #include #include #include #include #include "hwmgr.h" -#define WIDTH_4K 3840 static const struct cg_flag_name clocks[] = { {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, @@ -81,45 +78,6 @@ static const struct hwmon_temp_label { {PP_TEMP_MEM, "mem"}, }; -void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) -{ - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - if (power_supply_is_system_supplied() > 0) - adev->pm.ac_power = true; - else - adev->pm.ac_power = false; - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->enable_bapm) - amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); - mutex_unlock(&adev->pm.mutex); - - if (is_support_sw_smu(adev)) - smu_set_ac_dc(&adev->smu); - } -} - -int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, - void *data, uint32_t *size) -{ - int ret = 0; - - if (!data || !size) - return -EINVAL; - - if (is_support_sw_smu(adev)) - ret = smu_read_sensor(&adev->smu, sensor, data, size); - else { - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) - ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, - sensor, data, size); - else - ret = -EINVAL; - } - - return ret; -} - /** * DOC: power_dpm_state * @@ -159,11 +117,11 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_pm_state_type pm; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -197,11 +155,11 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_pm_state_type state; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (strncmp("battery", buf, strlen("battery")) == 0) @@ -303,11 +261,11 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_dpm_forced_level level = 0xff; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -344,12 +302,12 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_dpm_forced_level level; enum amd_dpm_forced_level current_level = 0xff; int ret = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (strncmp("low", buf, strlen("low")) == 0) { @@ -449,11 +407,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct pp_states_info data; int i, buf_len, ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -491,13 +449,13 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); struct pp_states_info data; struct smu_context *smu = &adev->smu; enum amd_pm_state_type pm = 0; int i = 0, ret = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -536,9 +494,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (adev->pp_force_state_enabled) @@ -553,12 +511,12 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_pm_state_type state = 0; unsigned long idx; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (strlen(buf) == 1) @@ -614,11 +572,11 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); char *table = NULL; int size, ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -659,10 +617,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -694,6 +652,52 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * in each power level within a power state. The pp_od_clk_voltage is used for * this. * + * Note that the actual memory controller clock rate are exposed, not + * the effective memory clock of the DRAMs. To translate it, use the + * following formula: + * + * Clock conversion (Mhz): + * + * HBM: effective_memory_clock = memory_controller_clock * 1 + * + * G5: effective_memory_clock = memory_controller_clock * 1 + * + * G6: effective_memory_clock = memory_controller_clock * 2 + * + * DRAM data rate (MT/s): + * + * HBM: effective_memory_clock * 2 = data_rate + * + * G5: effective_memory_clock * 4 = data_rate + * + * G6: effective_memory_clock * 8 = data_rate + * + * Bandwidth (MB/s): + * + * data_rate * vram_bit_width / 8 = memory_bandwidth + * + * Some examples: + * + * G5 on RX460: + * + * memory_controller_clock = 1750 Mhz + * + * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz + * + * data rate = 1750 * 4 = 7000 MT/s + * + * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s + * + * G6 on RX5700: + * + * memory_controller_clock = 875 Mhz + * + * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz + * + * data rate = 1750 * 8 = 14000 MT/s + * + * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s + * * < For Vega10 and previous ASICs > * * Reading the file will display: @@ -759,7 +763,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; uint32_t parameter_size = 0; long parameter[64]; @@ -769,7 +773,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, const char delimiter[3] = {' ', '\n', '\0'}; uint32_t type; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (count > 127) @@ -796,7 +800,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str++; while (isspace(*++tmp_str)); - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -858,11 +863,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -912,11 +917,11 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint64_t featuremask; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = kstrtou64(buf, 0, &featuremask); @@ -957,11 +962,11 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1018,11 +1023,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1066,7 +1071,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) memcpy(buf_cpy, buf, bytes); buf_cpy[bytes] = '\0'; tmp = buf_cpy; - while ((sub_str = strsep(&tmp, delimiter)) != NULL) { + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); if (strlen(sub_str)) { ret = kstrtol(sub_str, 0, &level); if (ret) @@ -1085,11 +1091,11 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; uint32_t mask = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); @@ -1121,11 +1127,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1153,11 +1159,11 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t mask = 0; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); @@ -1189,11 +1195,11 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1221,11 +1227,11 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; uint32_t mask = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); @@ -1259,11 +1265,11 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1291,11 +1297,11 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; uint32_t mask = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); @@ -1329,11 +1335,11 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1361,11 +1367,11 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; uint32_t mask = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); @@ -1399,11 +1405,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1431,11 +1437,11 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; uint32_t mask = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = amdgpu_read_mask(buf, count, &mask); @@ -1469,11 +1475,11 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t value = 0; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1499,11 +1505,11 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; long int value; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1542,11 +1548,11 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t value = 0; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1572,11 +1578,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int ret; long int value; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = kstrtol(buf, 0, &value); @@ -1635,11 +1641,11 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); ssize_t size; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; ret = pm_runtime_get_sync(ddev->dev); @@ -1669,7 +1675,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, { int ret; struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint32_t parameter_size = 0; long parameter[64]; char *sub_str, buf_cpy[128]; @@ -1679,7 +1685,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, long int profile_mode = 0; const char delimiter[3] = {' ', '\n', '\0'}; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; tmp[0] = *(buf); @@ -1695,7 +1701,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, i++; memcpy(buf_cpy, buf, count-i); tmp_str = buf_cpy; - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -1739,10 +1746,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int r, value, size = sizeof(value); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; r = pm_runtime_get_sync(ddev->dev); @@ -1777,10 +1784,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); int r, value, size = sizeof(value); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; r = pm_runtime_get_sync(ddev->dev); @@ -1819,11 +1826,11 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); uint64_t count0 = 0, count1 = 0; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (adev->flags & AMD_IS_APU) @@ -1862,9 +1869,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (adev->unique_id) @@ -1893,10 +1900,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n", - adev->ddev->unique, + adev_to_drm(adev)->unique, atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", adev->throttling_logging_rs.interval / HZ + 1); } @@ -1907,7 +1914,7 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, size_t count) { struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; + struct amdgpu_device *adev = drm_to_adev(ddev); long throttling_logging_interval; unsigned long flags; int ret = 0; @@ -1940,6 +1947,57 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, return count; } +/** + * DOC: gpu_metrics + * + * The amdgpu driver provides a sysfs API for retrieving current gpu + * metrics data. The file gpu_metrics is used for this. Reading the + * file will dump all the current gpu metrics data. + * + * These data include temperature, frequency, engines utilization, + * power consume, throttler status, fan speed and cpu core statistics( + * available for APU only). That's it will give a snapshot of all sensors + * at the same time. + */ +static ssize_t amdgpu_get_gpu_metrics(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + void *gpu_metrics; + ssize_t size = 0; + int ret; + + if (amdgpu_in_reset(adev)) + return -EPERM; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + if (is_support_sw_smu(adev)) + size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics); + else if (adev->powerplay.pp_funcs->get_gpu_metrics) + size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); + + if (size <= 0) + goto out; + + if (size >= PAGE_SIZE) + size = PAGE_SIZE - 1; + + memcpy(buf, gpu_metrics, size); + +out: + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; +} + static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC), @@ -1963,6 +2021,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC), }; static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, @@ -2012,6 +2071,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(pp_features)) { if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(gpu_metrics)) { + if (asic_type < CHIP_VEGA12) + *states = ATTR_STATE_UNSUPPORTED; } if (asic_type == CHIP_ARCTURUS) { @@ -2131,15 +2193,15 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, int channel = to_sensor_dev_attr(attr)->index; int r, temp = 0, size = sizeof(temp); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (channel >= PP_TEMP_MAX) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2164,8 +2226,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, break; } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2267,12 +2329,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, u32 pwm_mode = 0; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - ret = pm_runtime_get_sync(adev->ddev->dev); + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; } @@ -2280,16 +2342,16 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, pwm_mode = smu_get_fan_control_mode(&adev->smu); } else { if (!adev->powerplay.pp_funcs->get_fan_control_mode) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return -EINVAL; } pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return sprintf(buf, "%i\n", pwm_mode); } @@ -2303,16 +2365,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, int err, ret; int value; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; err = kstrtoint(buf, 10, &value); if (err) return err; - ret = pm_runtime_get_sync(adev->ddev->dev); + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; } @@ -2320,16 +2382,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, smu_set_fan_control_mode(&adev->smu, value); } else { if (!adev->powerplay.pp_funcs->set_fan_control_mode) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return -EINVAL; } amdgpu_dpm_set_fan_control_mode(adev, value); } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return count; } @@ -2357,12 +2419,12 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, u32 value; u32 pwm_mode; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2373,15 +2435,15 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (pwm_mode != AMD_FAN_CTRL_MANUAL) { pr_info("manual fan speed control should be enabled first\n"); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return -EINVAL; } err = kstrtou32(buf, 10, &value); if (err) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2394,8 +2456,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, else err = -EINVAL; - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) return err; @@ -2411,12 +2473,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, int err; u32 speed = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2427,8 +2489,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, else err = -EINVAL; - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) return err; @@ -2446,12 +2508,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, int err; u32 speed = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2462,8 +2524,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, else err = -EINVAL; - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) return err; @@ -2480,20 +2542,20 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, u32 size = sizeof(min_rpm); int r; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, (void *)&min_rpm, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2510,20 +2572,20 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, u32 size = sizeof(max_rpm); int r; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, (void *)&max_rpm, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2539,12 +2601,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, int err; u32 rpm = 0; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2555,8 +2617,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, else err = -EINVAL; - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) return err; @@ -2573,12 +2635,12 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, u32 value; u32 pwm_mode; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2588,15 +2650,15 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); if (pwm_mode != AMD_FAN_CTRL_MANUAL) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return -ENODATA; } err = kstrtou32(buf, 10, &value); if (err) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2607,8 +2669,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, else err = -EINVAL; - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) return err; @@ -2624,12 +2686,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, u32 pwm_mode = 0; int ret; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - ret = pm_runtime_get_sync(adev->ddev->dev); + ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (ret < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; } @@ -2637,16 +2699,16 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, pwm_mode = smu_get_fan_control_mode(&adev->smu); } else { if (!adev->powerplay.pp_funcs->get_fan_control_mode) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return -EINVAL; } pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); } @@ -2661,7 +2723,7 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, int value; u32 pwm_mode; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; err = kstrtoint(buf, 10, &value); @@ -2675,9 +2737,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, else return -EINVAL; - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2685,15 +2747,15 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, smu_set_fan_control_mode(&adev->smu, pwm_mode); } else { if (!adev->powerplay.pp_funcs->set_fan_control_mode) { - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return -EINVAL; } amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return count; } @@ -2706,12 +2768,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, u32 vddgfx; int r, size = sizeof(vddgfx); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2719,8 +2781,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&vddgfx, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2743,16 +2805,16 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, u32 vddnb; int r, size = sizeof(vddnb); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU)) return -EINVAL; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2760,8 +2822,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&vddnb, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2785,12 +2847,12 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, int r, size = sizeof(u32); unsigned uw; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2798,8 +2860,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2826,12 +2888,12 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, ssize_t size; int r; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2845,8 +2907,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, size = snprintf(buf, PAGE_SIZE, "\n"); } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return size; } @@ -2860,12 +2922,12 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ssize_t size; int r; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2879,8 +2941,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, size = snprintf(buf, PAGE_SIZE, "\n"); } - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return size; } @@ -2895,7 +2957,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, int err; u32 value; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; if (amdgpu_sriov_vf(adev)) @@ -2908,9 +2970,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, value = value / 1000000; /* convert to Watt */ - err = pm_runtime_get_sync(adev->ddev->dev); + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } @@ -2921,8 +2983,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, else err = -EINVAL; - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (err) return err; @@ -2938,12 +3000,12 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, uint32_t sclk; int r, size = sizeof(sclk); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2951,8 +3013,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&sclk, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -2975,12 +3037,12 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, uint32_t mclk; int r, size = sizeof(mclk); - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; - r = pm_runtime_get_sync(adev->ddev->dev); + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } @@ -2988,8 +3050,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&mclk, &size); - pm_runtime_mark_last_busy(adev->ddev->dev); - pm_runtime_put_autosuspend(adev->ddev->dev); + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) return r; @@ -3249,14 +3311,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, } if (((adev->flags & AMD_IS_APU) || - adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ - adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ - (attr == &sensor_dev_attr_power1_average.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */ + (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) return 0; + if (((adev->family == AMDGPU_FAMILY_SI) || + ((adev->flags & AMD_IS_APU) && + (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */ + (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) + return 0; + if (!is_support_sw_smu(adev)) { /* hide max/min values if we can't both query and manage the fan */ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && @@ -3321,338 +3387,6 @@ static const struct attribute_group *hwmon_groups[] = { NULL }; -void amdgpu_dpm_thermal_work_handler(struct work_struct *work) -{ - struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, - pm.dpm.thermal.work); - /* switch to the thermal state */ - enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; - int temp, size = sizeof(temp); - - if (!adev->pm.dpm_enabled) - return; - - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, - (void *)&temp, &size)) { - if (temp < adev->pm.dpm.thermal.min_temp) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } else { - if (adev->pm.dpm.thermal.high_to_low) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } - mutex_lock(&adev->pm.mutex); - if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) - adev->pm.dpm.thermal_active = true; - else - adev->pm.dpm.thermal_active = false; - adev->pm.dpm.state = dpm_state; - mutex_unlock(&adev->pm.mutex); - - amdgpu_pm_compute_clocks(adev); -} - -static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, - enum amd_pm_state_type dpm_state) -{ - int i; - struct amdgpu_ps *ps; - u32 ui_class; - bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? - true : false; - - /* check if the vblank period is too short to adjust the mclk */ - if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { - if (amdgpu_dpm_vblank_too_short(adev)) - single_display = false; - } - - /* certain older asics have a separare 3D performance state, - * so try that first if the user selected performance - */ - if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) - dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; - /* balanced states don't exist at the moment */ - if (dpm_state == POWER_STATE_TYPE_BALANCED) - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - -restart_search: - /* Pick the best power state based on current conditions */ - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - ps = &adev->pm.dpm.ps[i]; - ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; - switch (dpm_state) { - /* user states */ - case POWER_STATE_TYPE_BATTERY: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_BALANCED: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_PERFORMANCE: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - /* internal states */ - case POWER_STATE_TYPE_INTERNAL_UVD: - if (adev->pm.dpm.uvd_ps) - return adev->pm.dpm.uvd_ps; - else - break; - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_BOOT: - return adev->pm.dpm.boot_ps; - case POWER_STATE_TYPE_INTERNAL_THERMAL: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ACPI: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ULV: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_3DPERF: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - return ps; - break; - default: - break; - } - } - /* use a fallback state if we didn't match */ - switch (dpm_state) { - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (adev->pm.dpm.uvd_ps) { - return adev->pm.dpm.uvd_ps; - } else { - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - } - case POWER_STATE_TYPE_INTERNAL_THERMAL: - dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_ACPI: - dpm_state = POWER_STATE_TYPE_BATTERY; - goto restart_search; - case POWER_STATE_TYPE_BATTERY: - case POWER_STATE_TYPE_BALANCED: - case POWER_STATE_TYPE_INTERNAL_3DPERF: - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - default: - break; - } - - return NULL; -} - -static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) -{ - struct amdgpu_ps *ps; - enum amd_pm_state_type dpm_state; - int ret; - bool equal = false; - - /* if dpm init failed */ - if (!adev->pm.dpm_enabled) - return; - - if (adev->pm.dpm.user_state != adev->pm.dpm.state) { - /* add other state override checks here */ - if ((!adev->pm.dpm.thermal_active) && - (!adev->pm.dpm.uvd_active)) - adev->pm.dpm.state = adev->pm.dpm.user_state; - } - dpm_state = adev->pm.dpm.state; - - ps = amdgpu_dpm_pick_power_state(adev, dpm_state); - if (ps) - adev->pm.dpm.requested_ps = ps; - else - return; - - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { - printk("switching from power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); - } - - /* update whether vce is active */ - ps->vce_active = adev->pm.dpm.vce_active; - if (adev->powerplay.pp_funcs->display_configuration_changed) - amdgpu_dpm_display_configuration_changed(adev); - - ret = amdgpu_dpm_pre_set_power_state(adev); - if (ret) - return; - - if (adev->powerplay.pp_funcs->check_state_equal) { - if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) - equal = false; - } - - if (equal) - return; - - amdgpu_dpm_set_power_state(adev); - amdgpu_dpm_post_set_power_state(adev); - - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - - if (adev->powerplay.pp_funcs->force_performance_level) { - if (adev->pm.dpm.thermal_active) { - enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; - /* force low perf level for thermal */ - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); - /* save the user's level */ - adev->pm.dpm.forced_level = level; - } else { - /* otherwise, user selected level */ - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); - } - } -} - -void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) -{ - int ret = 0; - - if (adev->family == AMDGPU_FAMILY_SI) { - mutex_lock(&adev->pm.mutex); - if (enable) { - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - } else { - adev->pm.dpm.uvd_active = false; - } - mutex_unlock(&adev->pm.mutex); - - amdgpu_pm_compute_clocks(adev); - } else { - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); - if (ret) - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", - enable ? "enable" : "disable", ret); - - /* enable/disable Low Memory PState for UVD (4k videos) */ - if (adev->asic_type == CHIP_STONEY && - adev->uvd.decode_image_width >= WIDTH_4K) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - - if (hwmgr && hwmgr->hwmgr_func && - hwmgr->hwmgr_func->update_nbdpm_pstate) - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, - !enable, - true); - } - } -} - -void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) -{ - int ret = 0; - - if (adev->family == AMDGPU_FAMILY_SI) { - mutex_lock(&adev->pm.mutex); - if (enable) { - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; - } else { - adev->pm.dpm.vce_active = false; - } - mutex_unlock(&adev->pm.mutex); - - amdgpu_pm_compute_clocks(adev); - } else { - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); - if (ret) - DRM_ERROR("Dpm %s vce failed, ret = %d. \n", - enable ? "enable" : "disable", ret); - } -} - -void amdgpu_pm_print_power_states(struct amdgpu_device *adev) -{ - int i; - - if (adev->powerplay.pp_funcs->print_power_state == NULL) - return; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) - amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); - -} - -void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) -{ - int ret = 0; - - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); - if (ret) - DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", - enable ? "enable" : "disable", ret); -} - -int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) -{ - int r; - - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { - r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); - if (r) { - pr_err("smu firmware loading failed\n"); - return r; - } - *smu_version = adev->pm.fw_version; - } - return 0; -} - int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { int ret; @@ -3713,55 +3447,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); } -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) -{ - int i = 0; - - if (!adev->pm.dpm_enabled) - return; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } - - if (is_support_sw_smu(adev)) { - struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; - smu_handle_task(&adev->smu, - smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, - true); - } else { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - if (!amdgpu_device_has_dc_support(adev)) { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - if (adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); - mutex_unlock(&adev->pm.mutex); - } - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); - } else { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - amdgpu_dpm_change_power_state_locked(adev); - mutex_unlock(&adev->pm.mutex); - } - } -} - /* * Debugfs info */ @@ -3869,11 +3554,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); u32 flags = 0; int r; - if (adev->in_gpu_reset) + if (amdgpu_in_reset(adev)) return -EPERM; r = pm_runtime_get_sync(dev->dev); @@ -3882,11 +3567,6 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) return r; } - amdgpu_device_ip_get_clockgating_state(adev, &flags); - seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); - amdgpu_parse_cg_state(m, flags); - seq_printf(m, "\n"); - if (!adev->pm.dpm_enabled) { seq_printf(m, "dpm not enabled\n"); pm_runtime_mark_last_busy(dev->dev); @@ -3906,7 +3586,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) } else { r = amdgpu_debugfs_pm_info_pp(m, adev); } + if (r) + goto out; + + amdgpu_device_ip_get_clockgating_state(adev, &flags); + + seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); + amdgpu_parse_cg_state(m, flags); + seq_printf(m, "\n"); +out: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h rename to drivers/gpu/drm/amd/pm/inc/amd_powerplay.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h similarity index 94% rename from drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h rename to drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index aa27fe65cdfa94d74c5c23a2bba4de5462f8f9f2..dff4a5f99bb0374cf65325962a8dc586a3f5c60f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -341,10 +341,6 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ (adev)->powerplay.pp_handle, request)) -#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \ - ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ - (adev)->powerplay.pp_handle, msg_id)) - #define amdgpu_dpm_get_power_profile_mode(adev, buf) \ ((adev)->powerplay.pp_funcs->get_power_profile_mode(\ (adev)->powerplay.pp_handle, buf)) @@ -357,10 +353,6 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\ (adev)->powerplay.pp_handle, type, parameter, size)) -#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \ - ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\ - (adev)->powerplay.pp_handle)) - #define amdgpu_dpm_get_ppfeature_status(adev, buf) \ ((adev)->powerplay.pp_funcs->get_ppfeature_status(\ (adev)->powerplay.pp_handle, (buf))) @@ -369,6 +361,9 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->set_ppfeature_status(\ (adev)->powerplay.pp_handle, (ppfeatures))) +#define amdgpu_dpm_get_gpu_metrics(adev, table) \ + ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table)) + struct amdgpu_dpm { struct amdgpu_ps *ps; /* number of valid power states */ @@ -545,4 +540,26 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en); +int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev); + +int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, + uint32_t msg_id); + +int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, + bool acquire); + +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); + +int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, + void *data, uint32_t *size); + +void amdgpu_dpm_thermal_work_handler(struct work_struct *work); + +void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); +void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); +void amdgpu_pm_print_power_states(struct amdgpu_device *adev); +int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h similarity index 84% rename from drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h rename to drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index d9ae2b49a402fef3b11e425d57d41fe74ab48f7a..45a22e101d1593e065aea7a56c230e7b4170e448 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -79,18 +79,10 @@ struct amdgpu_device_attr_entry { amdgpu_get_##_name, NULL, \ _flags, ##__VA_ARGS__) -void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev); void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev); -void amdgpu_pm_print_power_states(struct amdgpu_device *adev); -int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); -void amdgpu_dpm_thermal_work_handler(struct work_struct *work); -void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); -void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); -void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h similarity index 97% rename from drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h rename to drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 074458eb54076733fdf344d24fd50d21304a4c2b..d22a759b6b435d0acb2a18ab7265e7b42e4b25db 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -274,6 +274,9 @@ struct smu_table_context void *overdrive_table; void *boot_overdrive_table; + + uint32_t gpu_metrics_table_size; + void *gpu_metrics_table; }; struct smu_dpm_context { @@ -448,6 +451,10 @@ struct smu_context bool dc_controlled_by_gpio; struct work_struct throttling_logging_work; + atomic64_t throttle_int_counter; + + unsigned fan_max_rpm; + unsigned manual_fan_speed_rpm; }; struct i2c_adapter; @@ -491,7 +498,6 @@ struct pptable_funcs { int (*notify_smc_display_config)(struct smu_context *smu); int (*set_cpu_power_state)(struct smu_context *smu); bool (*is_dpm_running)(struct smu_context *smu); - int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); int (*set_watermarks_table)(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); @@ -567,7 +573,6 @@ struct pptable_funcs { int (*conv_power_profile_to_pplib_workload)(int power_profile); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); - int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); int (*gfx_off_control)(struct smu_context *smu, bool enable); @@ -590,6 +595,11 @@ struct pptable_funcs { void (*log_thermal_throttling_event)(struct smu_context *smu); size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf); int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask); + ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); + int (*enable_mgpu_fan_boost)(struct smu_context *smu); + int (*gfx_ulv_control)(struct smu_context *smu, bool enablement); + int (*deep_sleep_control)(struct smu_context *smu, bool enablement); + int (*get_fan_parameters)(struct smu_context *smu); }; typedef enum { @@ -792,5 +802,9 @@ int smu_get_dpm_clock_table(struct smu_context *smu, int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); +ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, void **table); + +int smu_enable_mgpu_fan_boost(struct smu_context *smu); + #endif #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h rename to drivers/gpu/drm/amd/pm/inc/hardwaremanager.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/inc/hwmgr.h similarity index 99% rename from drivers/gpu/drm/amd/powerplay/inc/hwmgr.h rename to drivers/gpu/drm/amd/pm/inc/hwmgr.h index 15ed6cbdf36604ac893f0d72a7579915bed27974..1b3529efc91e6a980410c82ca8719ec5d606a8f0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/inc/hwmgr.h @@ -359,6 +359,7 @@ struct pp_hwmgr_func { int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, bool disable); + ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table); }; struct pp_table_func { diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h rename to drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/inc/power_state.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/power_state.h rename to drivers/gpu/drm/amd/pm/inc/power_state.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/pm/inc/pp_debug.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/pp_debug.h rename to drivers/gpu/drm/amd/pm/inc/pp_debug.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h b/drivers/gpu/drm/amd/pm/inc/pp_endian.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/pp_endian.h rename to drivers/gpu/drm/amd/pm/inc/pp_endian.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/inc/pp_thermal.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h rename to drivers/gpu/drm/amd/pm/inc/pp_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h b/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h rename to drivers/gpu/drm/amd/pm/inc/ppinterrupt.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/pm/inc/smu10.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu10.h rename to drivers/gpu/drm/amd/pm/inc/smu10.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu10_driver_if.h rename to drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h rename to drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h rename to drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h similarity index 97% rename from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h rename to drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h index 4b2da98afcd27414642b89ee1a8499d8a4c4ef1e..246d3951a78ab471bc7cc9f7bdc293a020755337 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h @@ -884,6 +884,45 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } SmuMetrics_t; +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + uint32_t EnergyAccumulator; + uint16_t AverageVclkFrequency ; + uint16_t AverageDclkFrequency ; + uint16_t VcnActivityPercentage ; + uint16_t padding16_2; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_NV12_t; + typedef struct { uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h similarity index 99% rename from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h rename to drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h index aa2708fccb6deeb97d4d9fea6c4ab49f12b0533d..5ef9c92f57c47ffcd1a3f4ed06387b06e8ae0a9e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x34 +#define SMU11_DRIVER_IF_VERSION 0x35 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 5 @@ -127,7 +127,7 @@ #define FEATURE_DF_CSTATE_BIT 45 #define FEATURE_2_STEP_PSTATE_BIT 46 #define FEATURE_SMNCLK_DPM_BIT 47 -#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_PERLINK_GMIDOWN_BIT 48 #define FEATURE_GFX_EDC_BIT 49 #define FEATURE_SPARE_50_BIT 50 #define FEATURE_SPARE_51_BIT 51 @@ -169,7 +169,7 @@ typedef enum { #define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN 0x00000200 #define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400 #define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCEFCLK 0x00000800 -#define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00001000 +#define DPM_OVERRIDE_ENABLE_FAST_FCLK_TIMER 0x00001000 #define DPM_OVERRIDE_DISABLE_VCN_PG 0x00002000 #define DPM_OVERRIDE_DISABLE_FMAX_VMAX 0x00004000 diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h rename to drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/pm/inc/smu7.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu7.h rename to drivers/gpu/drm/amd/pm/inc/smu7.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/pm/inc/smu71.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu71.h rename to drivers/gpu/drm/amd/pm/inc/smu71.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h rename to drivers/gpu/drm/amd/pm/inc/smu71_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/pm/inc/smu72.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu72.h rename to drivers/gpu/drm/amd/pm/inc/smu72.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h rename to drivers/gpu/drm/amd/pm/inc/smu72_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu73.h b/drivers/gpu/drm/amd/pm/inc/smu73.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu73.h rename to drivers/gpu/drm/amd/pm/inc/smu73.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h rename to drivers/gpu/drm/amd/pm/inc/smu73_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/pm/inc/smu74.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu74.h rename to drivers/gpu/drm/amd/pm/inc/smu74.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h rename to drivers/gpu/drm/amd/pm/inc/smu74_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/pm/inc/smu75.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu75.h rename to drivers/gpu/drm/amd/pm/inc/smu75.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h rename to drivers/gpu/drm/amd/pm/inc/smu75_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/pm/inc/smu7_common.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu7_common.h rename to drivers/gpu/drm/amd/pm/inc/smu7_common.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h rename to drivers/gpu/drm/amd/pm/inc/smu7_discrete.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h b/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h rename to drivers/gpu/drm/amd/pm/inc/smu7_fusion.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu8.h b/drivers/gpu/drm/amd/pm/inc/smu8.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu8.h rename to drivers/gpu/drm/amd/pm/inc/smu8.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h b/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h rename to drivers/gpu/drm/amd/pm/inc/smu8_fusion.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/pm/inc/smu9.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu9.h rename to drivers/gpu/drm/amd/pm/inc/smu9.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h rename to drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_types.h rename to drivers/gpu/drm/amd/pm/inc/smu_types.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h rename to drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h rename to drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h similarity index 91% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h rename to drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index 6a42331aba8aae89a6d3b2e9507bd695454f1f29..1f9575a4dfe7c9e1d8951ba824f141b30f6d4a8b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -28,10 +28,10 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_ARCT 0x17 #define SMU11_DRIVER_IF_VERSION_NV10 0x36 -#define SMU11_DRIVER_IF_VERSION_NV12 0x33 +#define SMU11_DRIVER_IF_VERSION_NV12 0x36 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 -#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34 -#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3 +#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x35 +#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x4 /* MP Apertures */ #define MP0_Public 0x03800000 @@ -200,12 +200,12 @@ int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode); -int -smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); - int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); +int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed); + int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate); @@ -264,5 +264,21 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu, uint32_t *min_value, uint32_t *max_value); +int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu); + +int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu); + +int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu); + +int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu); + +void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics); + +int smu_v11_0_gfx_ulv_control(struct smu_context *smu, + bool enablement); + +int smu_v11_0_deep_sleep_control(struct smu_context *smu, + bool enablement); + #endif #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_pptable.h rename to drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h similarity index 98% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h index 406bfd187ce864f08de97c1ddb34fab2e0852f2d..fa0174dc7e0eac5c060e8686d0ec818ad0519c46 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h @@ -123,7 +123,9 @@ #define PPSMC_MSG_DALDisableDummyPstateChange 0x49 #define PPSMC_MSG_DALEnableDummyPstateChange 0x4A -#define PPSMC_Message_Count 0x4B +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x4C + +#define PPSMC_Message_Count 0x4D typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h rename to drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h similarity index 96% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h rename to drivers/gpu/drm/amd/pm/inc/smu_v12_0.h index 02de3b6199e5332afe1b7308f5dce9e12c27ed5e..fa2e8cb079670aa800d024c934d5767f23f6ad25 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h @@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ int smu_v12_0_set_driver_table_location(struct smu_context *smu); +void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics); + #endif #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/pm/inc/smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/smumgr.h rename to drivers/gpu/drm/amd/pm/inc/smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h rename to drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h rename to drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/pm/powerplay/Makefile similarity index 70% rename from drivers/gpu/drm/amd/powerplay/Makefile rename to drivers/gpu/drm/amd/pm/powerplay/Makefile index e9c48f99f71b5ebcd7b8efd6aaffcb6f64eba579..0fb114adc79f61398338f5d999804a0bd767683f 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/pm/powerplay/Makefile @@ -1,5 +1,5 @@ # -# Copyright 2017 Advanced Micro Devices, Inc. +# Copyright 2020 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), @@ -20,25 +20,20 @@ # OTHER DEALINGS IN THE SOFTWARE. # -subdir-ccflags-y += \ - -I$(FULL_AMD_PATH)/powerplay/inc/ \ - -I$(FULL_AMD_PATH)/include/asic_reg \ - -I$(FULL_AMD_PATH)/include \ - -I$(FULL_AMD_PATH)/powerplay/smumgr\ - -I$(FULL_AMD_PATH)/powerplay/hwmgr - -AMD_PP_PATH = ../powerplay +AMD_PP_PATH = ../pm/powerplay PP_LIBS = smumgr hwmgr -AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS))) +AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/powerplay/,$(PP_LIBS))) include $(AMD_POWERPLAY) -POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o \ - smu_v12_0.o arcturus_ppt.o navi10_ppt.o \ - renoir_ppt.o sienna_cichlid_ppt.o smu_cmn.o +POWER_MGR-y = amd_powerplay.o + +POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o + +POWER_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o -AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR)) +AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR-y)) AMD_POWERPLAY_FILES += $(AMD_PP_POWER) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c similarity index 98% rename from drivers/gpu/drm/amd/powerplay/amd_powerplay.c rename to drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 7e6dcdf7df73a83d37da79af18fe516205d84a57..a6321f2063c1d8ca2434fc46d3e43f844fc20a4c 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1598,6 +1598,24 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) return 0; } +static ssize_t pp_get_gpu_metrics(void *handle, void **table) +{ + struct pp_hwmgr *hwmgr = handle; + ssize_t size; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics) + return -EOPNOTSUPP; + + mutex_lock(&hwmgr->smu_lock); + size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table); + mutex_unlock(&hwmgr->smu_lock); + + return size; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1658,4 +1676,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .smu_i2c_bus_access = pp_smu_i2c_bus_access, .set_df_cstate = pp_set_df_cstate, .set_xgmi_pstate = pp_set_xgmi_pstate, + .get_gpu_metrics = pp_get_gpu_metrics, }; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/cik_dpm.h rename to drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/Makefile similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/Makefile rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/Makefile diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c index 9454ab50f9a12d9bd937ad3907af081b6c481ac7..1f9b9facdf1f4345f2e10a2e301cf89af6b537ae 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c @@ -271,7 +271,10 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) { - PHM_FUNC_CHECK(hwmgr); + if (hwmgr == NULL || + hwmgr->hwmgr_func == NULL) + return false; + if (hwmgr->pp_one_vf) return false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr_ppt.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr_ppt.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h similarity index 98% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h index 3ee54f18294326afc810e133ebaa1c7d7cd29311..76ed2e413594b2a0d37a68c713d8d28db16c3897 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h @@ -26,15 +26,6 @@ #include "hwmgr.h" -#define MEM_TYPE_GDDR5 0x50 -#define MEM_TYPE_GDDR4 0x40 -#define MEM_TYPE_GDDR3 0x30 -#define MEM_TYPE_DDR2 0x20 -#define MEM_TYPE_GDDR1 0x10 -#define MEM_TYPE_DDR3 0xb0 -#define MEM_TYPE_MASK 0xF0 - - /* As returned from PowerConnectorDetectionTable. */ #define PP_ATOM_POWER_BUDGET_DISABLE_OVERDRIVE 0x80 #define PP_ATOM_POWER_BUDGET_SHOW_WARNING 0x40 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index c9cfe90a29471c6590b34b228711ae98c5e06658..9ee8cf8267c88e80896462b8d5dbb04dcb7de27b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -204,8 +204,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != clock) { + if (clock && smu10_data->deep_sleep_dcefclk != clock) { smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -219,8 +218,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->dcf_actual_hard_min_freq && - smu10_data->dcf_actual_hard_min_freq != clock) { + if (clock && smu10_data->dcf_actual_hard_min_freq != clock) { smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, @@ -234,8 +232,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->f_actual_hard_min_freq && - smu10_data->f_actual_hard_min_freq != clock) { + if (clock && smu10_data->f_actual_hard_min_freq != clock) { smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_inc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_inc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_dyn_defaults.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_dyn_defaults.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index ffe05b7cc1f0ae0a566c389443a94b20b7178cae..4a3b64aa21ceb12dc63877954f9994845bbdbba1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -3581,7 +3581,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_POWER: return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: - if ((data->vr_config & 0xff) == 0x2) + if ((data->vr_config & VRCONF_VDDGFX_MASK) == + (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); else diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_inc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_inc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c similarity index 95% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 468bdd6f669733afaf2b5465256b134f64c13f99..952cd3d7240e38905a05c4e5565b8a1c3e50b27c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -363,17 +363,29 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + /* + * As a common sense, usSoftwareShutdownTemp should be bigger + * than ThotspotLimit. For any invalid usSoftwareShutdownTemp, + * we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP + * to avoid false alarms. + */ + if ((tdp_table->usSoftwareShutdownTemp > + range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) { + if (high > tdp_table->usSoftwareShutdownTemp) + high = tdp_table->usSoftwareShutdownTemp; + } if (low > high) return -EINVAL; @@ -382,8 +394,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c similarity index 95% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index a678a67f1c0d0429a3c0bd1da3385d747eb74139..f0680dd585089dcbd3fb7db17564f8c70a1e13b0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -47,6 +47,13 @@ #include "pp_thermal.h" #include "vega12_baco.h" +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + +#define LINK_WIDTH_MAX 6 +#define LINK_SPEED_MAX 3 +static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static int link_speed[] = {25, 50, 80, 160}; static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); @@ -1255,22 +1262,29 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) return (mem_clk * 100); } -static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table) +static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, + SmuMetrics_t *metrics_table, + bool bypass_cache) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); int ret = 0; - if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) { - ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table, - TABLE_SMU_METRICS, true); + if (bypass_cache || + !data->metrics_time || + time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) { + ret = smum_smc_table_manager(hwmgr, + (uint8_t *)(&data->metrics_table), + TABLE_SMU_METRICS, + true); if (ret) { pr_info("Failed to export SMU metrics table!\n"); return ret; } - memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t)); data->metrics_time = jiffies; - } else + } + + if (metrics_table) memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); return ret; @@ -1281,7 +1295,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query) SmuMetrics_t metrics_table; int ret = 0; - ret = vega12_get_metrics_table(hwmgr, &metrics_table); + ret = vega12_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -1332,7 +1346,7 @@ static int vega12_get_current_activity_percent( SmuMetrics_t metrics_table; int ret = 0; - ret = vega12_get_metrics_table(hwmgr, &metrics_table); + ret = vega12_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -1380,7 +1394,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: - ret = vega12_get_metrics_table(hwmgr, &metrics_table); + ret = vega12_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -1389,7 +1403,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_MEM_TEMP: - ret = vega12_get_metrics_table(hwmgr, &metrics_table); + ret = vega12_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -2095,6 +2109,46 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return 0; } +static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr) +{ + uint32_t width_level; + + width_level = vega12_get_current_pcie_link_width_level(hwmgr); + if (width_level > LINK_WIDTH_MAX) + width_level = 0; + + return link_width[width_level]; +} + +static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} + +static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speed_level; + + speed_level = vega12_get_current_pcie_link_speed_level(hwmgr); + if (speed_level > LINK_SPEED_MAX) + speed_level = 0; + + return link_speed[speed_level]; +} + static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -2682,6 +2736,69 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr, return 0; } +static void vega12_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) +{ + memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); + + gpu_metrics->common_header.structure_size = + sizeof(struct gpu_metrics_v1_0); + gpu_metrics->common_header.format_revision = 1; + gpu_metrics->common_header.content_revision = 0; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); +} + +static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr, + void **table) +{ + struct vega12_hwmgr *data = + (struct vega12_hwmgr *)(hwmgr->backend); + struct gpu_metrics_v1_0 *gpu_metrics = + &data->gpu_metrics_table; + SmuMetrics_t metrics; + uint32_t fan_speed_rpm; + int ret; + + ret = vega12_get_metrics_table(hwmgr, &metrics, true); + if (ret) + return ret; + + vega12_init_gpu_metrics_v1_0(gpu_metrics); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureHBM; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + vega12_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm); + gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm; + + gpu_metrics->pcie_link_width = + vega12_get_current_pcie_link_width(hwmgr); + gpu_metrics->pcie_link_speed = + vega12_get_current_pcie_link_speed(hwmgr); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_0); +} + static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .backend_init = vega12_hwmgr_backend_init, .backend_fini = vega12_hwmgr_backend_fini, @@ -2739,6 +2856,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .get_ppfeature_status = vega12_get_ppfeature_status, .set_ppfeature_status = vega12_set_ppfeature_status, .set_mp1_state = vega12_set_mp1_state, + .get_gpu_metrics = vega12_get_gpu_metrics, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h index 73875399666a826ff51d976091471069306c820a..aa63ae41942d55161980dc3ceb20696679555f4b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h @@ -399,6 +399,7 @@ struct vega12_hwmgr { unsigned long metrics_time; SmuMetrics_t metrics_table; + struct gpu_metrics_v1_0 gpu_metrics_table; }; #define VEGA12_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h similarity index 97% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h index e6d9e84059e17f5573fe47fa9491d80bf24fef36..0d08c57d3bca4f6b7bc70189c162d3bf382f7764 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h @@ -34,7 +34,6 @@ #include "asic_reg/gc/gc_9_2_1_offset.h" #include "asic_reg/gc/gc_9_2_1_sh_mask.h" -#include "asic_reg/nbio/nbio_6_1_offset.h" #include "asic_reg/nbio/nbio_6_1_offset.h" #include "asic_reg/nbio/nbio_6_1_sh_mask.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c index 195d8539fbb431c8837e660cbe2522d11311da1f..740e2fc7a03455a8a73278d8d7e47afed6dbeb8e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c @@ -252,7 +252,7 @@ static int init_powerplay_table_information( phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax, ATOM_VEGA12_PPCLOCK_COUNT); phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin, ATOM_VEGA12_PPCLOCK_COUNT); - pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL); + pptable_information->smc_pptable = kmalloc(sizeof(PPTable_t), GFP_KERNEL); if (pptable_information->smc_pptable == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c similarity index 94% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c index c15b9756025d9c523bb789195b98784bc3c5b51d..7ace439dcde7ace6313a54d296b8247391c17349 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c @@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c similarity index 96% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 3b8839641770bef71f9e2f39db60acba86845ecb..da84012b7fd5167c5883fb57ad164883c9b0f639 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -55,6 +55,11 @@ #define smnPCIE_LC_SPEED_CNTL 0x11140290 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 +#define LINK_WIDTH_MAX 6 +#define LINK_SPEED_MAX 3 +static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static int link_speed[] = {25, 50, 80, 160}; + static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = @@ -484,7 +489,7 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); int ret = 0; - bool use_baco = (adev->in_gpu_reset && + bool use_baco = (amdgpu_in_reset(adev) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || (adev->in_runpm && amdgpu_asic_supports_baco(adev)); @@ -979,10 +984,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint64_t features_enabled; - int i; - bool enabled; - int ret = 0; + int i, ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, @@ -990,17 +992,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) "[DisableAllSMUFeatures] Failed to disable all smu features!", return ret); - ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[DisableAllSMUFeatures] Failed to get enabled smc features!", - return ret); - - for (i = 0; i < GNLD_FEATURES_MAX; i++) { - enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? - true : false; - data->smu_features[i].enabled = enabled; - data->smu_features[i].supported = enabled; - } + for (i = 0; i < GNLD_FEATURES_MAX; i++) + data->smu_features[i].enabled = 0; return 0; } @@ -1652,12 +1645,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = true; data->vce_power_gated = true; - - if (data->smu_features[GNLD_DPM_UVD].enabled) - data->uvd_power_gated = false; - - if (data->smu_features[GNLD_DPM_VCE].enabled) - data->vce_power_gated = false; } static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) @@ -2085,22 +2072,29 @@ static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) return (mem_clk * 100); } -static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table) +static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, + SmuMetrics_t *metrics_table, + bool bypass_cache) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); int ret = 0; - if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) { - ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table, - TABLE_SMU_METRICS, true); + if (bypass_cache || + !data->metrics_time || + time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) { + ret = smum_smc_table_manager(hwmgr, + (uint8_t *)(&data->metrics_table), + TABLE_SMU_METRICS, + true); if (ret) { pr_info("Failed to export SMU metrics table!\n"); return ret; } - memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t)); data->metrics_time = jiffies; - } else + } + + if (metrics_table) memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); return ret; @@ -2112,7 +2106,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, int ret = 0; SmuMetrics_t metrics_table; - ret = vega20_get_metrics_table(hwmgr, &metrics_table); + ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -2150,7 +2144,7 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, int ret = 0; SmuMetrics_t metrics_table; - ret = vega20_get_metrics_table(hwmgr, &metrics_table); + ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -2180,7 +2174,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = vega20_get_metrics_table(hwmgr, &metrics_table); + ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -2205,7 +2199,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_EDGE_TEMP: - ret = vega20_get_metrics_table(hwmgr, &metrics_table); + ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -2214,7 +2208,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_MEM_TEMP: - ret = vega20_get_metrics_table(hwmgr, &metrics_table); + ret = vega20_get_metrics_table(hwmgr, &metrics_table, false); if (ret) return ret; @@ -3230,10 +3224,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) { - uint64_t features_enabled; - uint64_t features_to_enable; - uint64_t features_to_disable; - int ret = 0; + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint64_t features_enabled, features_to_enable, features_to_disable; + int i, ret = 0; + bool enabled; if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) return -EINVAL; @@ -3262,9 +3257,60 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; } + /* Update the cached feature enablement state */ + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? + true : false; + data->smu_features[i].enabled = enabled; + } + return 0; } +static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr) +{ + uint32_t width_level; + + width_level = vega20_get_current_pcie_link_width_level(hwmgr); + if (width_level > LINK_WIDTH_MAX) + width_level = 0; + + return link_width[width_level]; +} + +static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} + +static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speed_level; + + speed_level = vega20_get_current_pcie_link_speed_level(hwmgr); + if (speed_level > LINK_SPEED_MAX) + speed_level = 0; + + return link_speed[speed_level]; +} + static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -3277,7 +3323,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, struct phm_ppt_v3_information *pptable_information = (struct phm_ppt_v3_information *)hwmgr->pptable; PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; - struct amdgpu_device *adev = hwmgr->adev; struct pp_clock_levels_with_latency clocks; struct vega20_single_dpm_table *fclk_dpm_table = &(data->dpm_table.fclk_table); @@ -3371,12 +3416,10 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case PP_PCIE: - current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & - PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) - >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & - PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) - >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + current_gen_speed = + vega20_get_current_pcie_link_speed_level(hwmgr); + current_lane_width = + vega20_get_current_pcie_link_width_level(hwmgr); for (i = 0; i < NUM_LINK_LEVELS; i++) { if (i == 1 && data->pcie_parameters_override) { gen_speed = data->pcie_gen_level1; @@ -4218,6 +4261,72 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, return ret; } +static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) +{ + memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); + + gpu_metrics->common_header.structure_size = + sizeof(struct gpu_metrics_v1_0); + gpu_metrics->common_header.format_revision = 1; + gpu_metrics->common_header.content_revision = 0; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); +} + +static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr, + void **table) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct gpu_metrics_v1_0 *gpu_metrics = + &data->gpu_metrics_table; + SmuMetrics_t metrics; + uint32_t fan_speed_rpm; + int ret; + + ret = vega20_get_metrics_table(hwmgr, &metrics, true); + if (ret) + return ret; + + vega20_init_gpu_metrics_v1_0(gpu_metrics); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureHBM; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm); + gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm; + + gpu_metrics->pcie_link_width = + vega20_get_current_pcie_link_width(hwmgr); + gpu_metrics->pcie_link_speed = + vega20_get_current_pcie_link_speed(hwmgr); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_0); +} + static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* init/fini related */ .backend_init = vega20_hwmgr_backend_init, @@ -4288,6 +4397,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .smu_i2c_bus_access = vega20_smu_i2c_bus_access, .set_df_cstate = vega20_set_df_cstate, .set_xgmi_pstate = vega20_set_xgmi_pstate, + .get_gpu_metrics = vega20_get_gpu_metrics, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h index 2c3125f82b24acf2f6e6c7698694e88f3ee8a640..075c0094da9c0aee7ba889b23b8e8c4596984b8d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h @@ -527,6 +527,7 @@ struct vega20_hwmgr { unsigned long metrics_time; SmuMetrics_t metrics_table; + struct gpu_metrics_v1_0 gpu_metrics_table; bool pcie_parameters_override; uint32_t pcie_gen_level1; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_inc.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_inc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c similarity index 99% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c index 7a7f15d0c53afaacb70df7df97fa983e4ad35715..1f90825394575da09af05cfbfe7063784b9002a4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c @@ -890,14 +890,12 @@ static int init_powerplay_table_information( power_saving_clock_count); } - pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL); + pptable_information->smc_pptable = kmemdup(&(powerplay_table->smcPPTable), + sizeof(PPTable_t), + GFP_KERNEL); if (pptable_information->smc_pptable == NULL) return -ENOMEM; - memcpy(pptable_information->smc_pptable, - &(powerplay_table->smcPPTable), - sizeof(PPTable_t)); - result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); if (result) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c similarity index 94% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index 7add2f60f49c4094ffac2603858e8a9d3b8712dd..364162ddaa9c672d023531166f7f8d3683e6d1b8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h rename to drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/kv_dpm.c rename to drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/kv_dpm.h rename to drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/kv_smc.c rename to drivers/gpu/drm/amd/pm/powerplay/kv_smc.c diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/ppsmc.h rename to drivers/gpu/drm/amd/pm/powerplay/ppsmc.h diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/r600_dpm.h rename to drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c similarity index 99% rename from drivers/gpu/drm/amd/amdgpu/si_dpm.c rename to drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index ea914b256ebd6c41a0d8c11751eddd6974655a62..b5986d19dc08b576a832ffb807b8f38482323d0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -6196,7 +6196,7 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; if (current_link_speed == AMDGPU_PCIE_GEN2) break; - /* fall through */ + fallthrough; case AMDGPU_PCIE_GEN2: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/si_dpm.h rename to drivers/gpu/drm/amd/pm/powerplay/si_dpm.h diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/pm/powerplay/si_smc.c similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/si_smc.c rename to drivers/gpu/drm/amd/pm/powerplay/si_smc.c diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h similarity index 100% rename from drivers/gpu/drm/amd/amdgpu/sislands_smc.h rename to drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/pm/powerplay/smumgr/Makefile similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/Makefile rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/Makefile diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c similarity index 99% rename from drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index c18169aa59ce5bd117a1d82ac2ac00dd5af8ca31..e4d1f3d66ef4888fe4a9d2add1fe240db14731d0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -37,6 +37,7 @@ #include "cgs_common.h" #include "atombios.h" #include "pppcielanes.h" +#include "smu7_smumgr.h" #include "smu/smu_7_0_1_d.h" #include "smu/smu_7_0_1_sh_mask.h" @@ -2948,6 +2949,7 @@ const struct pp_smumgr_func ci_smu_funcs = { .request_smu_load_specific_fw = NULL, .send_msg_to_smc = ci_send_msg_to_smc, .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter, + .get_argument = smu7_get_argument, .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .get_offsetof = ci_get_offsetof, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h rename to drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.h diff --git a/drivers/gpu/drm/amd/pm/swsmu/Makefile b/drivers/gpu/drm/amd/pm/swsmu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6f281990b7b4b0e1949497c486a099ead245be8e --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/Makefile @@ -0,0 +1,36 @@ +# +# Copyright 2020 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +AMD_SWSMU_PATH = ../pm/swsmu + +SWSMU_LIBS = smu11 smu12 + +AMD_SWSMU = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/swsmu/,$(SWSMU_LIBS))) + +include $(AMD_SWSMU) + +SWSMU_MGR = amdgpu_smu.o \ + smu_cmn.o \ + +AMD_SWSMU_POWER = $(addprefix $(AMD_SWSMU_PATH)/,$(SWSMU_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_SWSMU_POWER) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c similarity index 97% rename from drivers/gpu/drm/amd/powerplay/amdgpu_smu.c rename to drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 0826625573dcea21e57e913ba778f3d1b9b382b3..7a55ece1f1246f32ae4423395116b8fca6208945 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -504,6 +504,8 @@ static int smu_late_init(void *handle) smu_get_unique_id(smu); + smu_get_fan_parameters(smu); + smu_handle_task(&adev->smu, smu->smu_dpm.dpm_level, AMD_PP_TASK_COMPLETE_INIT, @@ -756,6 +758,7 @@ static int smu_sw_init(void *handle) mutex_init(&smu->message_lock); INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); + atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -1109,7 +1112,7 @@ static int smu_disable_dpms(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; bool use_baco = !smu->is_apu && - ((adev->in_gpu_reset && + ((amdgpu_in_reset(adev) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); @@ -1439,6 +1442,8 @@ static int smu_enable_umd_pstate(void *handle, amdgpu_device_ip_set_clockgating_state(smu->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_UNGATE); + smu_gfx_ulv_control(smu, false); + smu_deep_sleep_control(smu, false); } } else { /* exit umd pstate, restore level, enable gfx cg*/ @@ -1446,6 +1451,8 @@ static int smu_enable_umd_pstate(void *handle, if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) *level = smu_dpm_ctx->saved_dpm_level; smu_dpm_ctx->enable_umd_pstate = false; + smu_deep_sleep_control(smu, true); + smu_gfx_ulv_control(smu, true); amdgpu_device_ip_set_clockgating_state(smu->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); @@ -2185,31 +2192,44 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value) int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { int ret = 0; + uint32_t percent; + uint32_t current_rpm; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_fan_speed_percent) - ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); + if (smu->ppt_funcs->get_fan_speed_rpm) { + ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm); + if (!ret) { + percent = current_rpm * 100 / smu->fan_max_rpm; + *speed = percent > 100 ? 100 : percent; + } + } mutex_unlock(&smu->mutex); + return ret; } int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { int ret = 0; + uint32_t rpm; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_fan_speed_percent) - ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); + if (smu->ppt_funcs->set_fan_speed_rpm) { + if (speed > 100) + speed = 100; + rpm = speed * smu->fan_max_rpm / 100; + ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm); + } mutex_unlock(&smu->mutex); @@ -2631,3 +2651,40 @@ int smu_get_dpm_clock_table(struct smu_context *smu, return ret; } + +ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + ssize_t size; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->ppt_funcs->get_gpu_metrics) + return -EOPNOTSUPP; + + mutex_lock(&smu->mutex); + + size = smu->ppt_funcs->get_gpu_metrics(smu, table); + + mutex_unlock(&smu->mutex); + + return size; +} + +int smu_enable_mgpu_fan_boost(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->enable_mgpu_fan_boost) + ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f98d97192635b30059108048e83d64e823ddf034 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile @@ -0,0 +1,33 @@ +# +# Copyright 2020 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'smu manager' sub-component of powerplay. +# It provides the smu management services for the driver. + +SMU11_MGR = arcturus_ppt.o \ + navi10_ppt.o \ + sienna_cichlid_ppt.o \ + smu_v11_0.o + +AMD_SWSMU_SMU11MGR = $(addprefix $(AMD_SWSMU_PATH)/smu11/,$(SMU11_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU11MGR) diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c similarity index 95% rename from drivers/gpu/drm/amd/powerplay/arcturus_ppt.c rename to drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 6c991de8f3712a5767388d198d8245b87ce51292..2ce6ad9c6609454734246e9653b3828d9cd18bce 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -79,6 +79,8 @@ /* possible frequency drift (1Mhz) */ #define EPSILON 1 +#define smnPCIE_ESM_CTRL 0x111003D0 + static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -234,6 +236,13 @@ static int arcturus_tables_init(struct smu_context *smu) return -ENOMEM; smu_table->metrics_time = 0; + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) { + kfree(smu_table->metrics_table); + return -ENOMEM; + } + return 0; } @@ -542,19 +551,12 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu, mutex_lock(&smu->metrics_lock); - if (!smu_table->metrics_time || - time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_SMU_METRICS, - 0, - smu_table->metrics_table, - false); - if (ret) { - dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); - mutex_unlock(&smu->metrics_lock); - return ret; - } - smu_table->metrics_time = jiffies; + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; } switch (member) { @@ -897,9 +899,10 @@ static int arcturus_force_clk_levels(struct smu_context *smu, return ret; } - if (smu_version >= 0x361200) { + if ((smu_version >= 0x361200) && + (smu_version <= 0x361a00)) { dev_err(smu->adev->dev, "Forcing clock level is not supported with " - "54.18 and onwards SMU firmwares\n"); + "54.18 - 54.26(included) SMU firmwares\n"); return -EOPNOTSUPP; } @@ -1120,29 +1123,23 @@ static int arcturus_get_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - return arcturus_get_smu_metrics_data(smu, - METRICS_CURR_FANSPEED, - speed); + switch (smu_v11_0_get_fan_control_mode(smu)) { + case AMD_FAN_CTRL_AUTO: + return arcturus_get_smu_metrics_data(smu, + METRICS_CURR_FANSPEED, + speed); + default: + return smu_v11_0_get_fan_speed_rpm(smu, speed); + } } -static int arcturus_get_fan_speed_percent(struct smu_context *smu, - uint32_t *speed) +static int arcturus_get_fan_parameters(struct smu_context *smu) { PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t percent, current_rpm; - int ret = 0; - if (!speed) - return -EINVAL; + smu->fan_max_rpm = pptable->FanMaximumRpm; - ret = arcturus_get_fan_speed_rpm(smu, ¤t_rpm); - if (ret) - return ret; - - percent = current_rpm * 100 / pptable->FanMaximumRpm; - *speed = percent > 100 ? 100 : percent; - - return ret; + return 0; } static int arcturus_get_power_limit(struct smu_context *smu) @@ -1392,9 +1389,10 @@ static int arcturus_set_performance_level(struct smu_context *smu, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - if (smu_version >= 0x361200) { + if ((smu_version >= 0x361200) && + (smu_version <= 0x361a00)) { dev_err(smu->adev->dev, "Forcing clock level is not supported with " - "54.18 and onwards SMU firmwares\n"); + "54.18 - 54.26(included) SMU firmwares\n"); return -EOPNOTSUPP; } break; @@ -1840,10 +1838,14 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) { int ret = 0; uint32_t feature_mask[2]; - unsigned long feature_enabled; + uint64_t feature_enabled; + ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); - feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | - ((uint64_t)feature_mask[1] << 32)); + if (ret) + return false; + + feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0]; + return !!(feature_enabled & SMC_DPM_FEATURE); } @@ -2204,14 +2206,17 @@ static const struct throttling_logging_label { }; static void arcturus_log_thermal_throttling_event(struct smu_context *smu) { + int ret; int throttler_idx, throtting_events = 0, buf_idx = 0; struct amdgpu_device *adev = smu->adev; uint32_t throttler_status; char log_buf[256]; - arcturus_get_smu_metrics_data(smu, - METRICS_THROTTLER_STATUS, - &throttler_status); + ret = arcturus_get_smu_metrics_data(smu, + METRICS_THROTTLER_STATUS, + &throttler_status); + if (ret) + return; memset(log_buf, 0, sizeof(log_buf)); for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label); @@ -2233,6 +2238,77 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu) dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n", log_buf); + kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status); +} + +static int arcturus_get_current_pcie_link_speed(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t esm_ctrl; + + /* TODO: confirm this on real target */ + esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); + if ((esm_ctrl >> 15) & 0x1FFFF) + return (((esm_ctrl >> 8) & 0x3F) + 128); + + return smu_v11_0_get_current_pcie_link_speed(smu); +} + +static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_0 *gpu_metrics = + (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + &metrics, + true); + if (ret) + return ret; + + smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureHBM; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + gpu_metrics->energy_accumulator = metrics.EnergyAccumulator; + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; + gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency; + gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; + + gpu_metrics->pcie_link_width = + smu_v11_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + arcturus_get_current_pcie_link_speed(smu); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_0); } static const struct pptable_funcs arcturus_ppt_funcs = { @@ -2247,7 +2323,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .print_clk_levels = arcturus_print_clk_levels, .force_clk_levels = arcturus_force_clk_levels, .read_sensor = arcturus_read_sensor, - .get_fan_speed_percent = arcturus_get_fan_speed_percent, .get_fan_speed_rpm = arcturus_get_fan_speed_rpm, .get_power_profile_mode = arcturus_get_power_profile_mode, .set_power_profile_mode = arcturus_set_power_profile_mode, @@ -2293,7 +2368,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, @@ -2312,6 +2386,10 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .log_thermal_throttling_event = arcturus_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .get_gpu_metrics = arcturus_get_gpu_metrics, + .gfx_ulv_control = smu_v11_0_gfx_ulv_control, + .deep_sleep_control = smu_v11_0_deep_sleep_control, + .get_fan_parameters = arcturus_get_fan_parameters, }; void arcturus_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/arcturus_ppt.h rename to drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.h diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c similarity index 93% rename from drivers/gpu/drm/amd/powerplay/navi10_ppt.c rename to drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 9f62af9abd23289f88fb9a2800a40c8f3f69d04c..42d53cca73600a0880e62abd885667ba9927e7fd 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -138,6 +138,7 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0), MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0), MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0), + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), }; static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = { @@ -456,13 +457,18 @@ static int navi10_tables_init(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; + struct amdgpu_device *adev = smu->adev; SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + if (adev->asic_type == CHIP_NAVI12) + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + else + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), @@ -473,16 +479,30 @@ static int navi10_tables_init(struct smu_context *smu) sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ? + sizeof(SmuMetrics_NV12_t) : + sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) - return -ENOMEM; + goto err0_out; smu_table->metrics_time = 0; + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); if (!smu_table->watermarks_table) - return -ENOMEM; + goto err2_out; return 0; + +err2_out: + kfree(smu_table->gpu_metrics_table); +err1_out: + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; } static int navi10_get_smu_metrics_data(struct smu_context *smu, @@ -490,23 +510,22 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, uint32_t *value) { struct smu_table_context *smu_table= &smu->smu_table; + /* + * This works for NV12 also. As although NV12 uses a different + * SmuMetrics structure from other NV1X ASICs, they share the + * same offsets for the heading parts(those members used here). + */ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; mutex_lock(&smu->metrics_lock); - if (!smu_table->metrics_time || - time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_SMU_METRICS, - 0, - smu_table->metrics_table, - false); - if (ret) { - dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); - mutex_unlock(&smu->metrics_lock); - return ret; - } - smu_table->metrics_time = jiffies; + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; } switch (member) { @@ -909,7 +928,6 @@ static int navi10_print_clk_levels(struct smu_context *smu, uint32_t gen_speed, lane_width; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; - struct amdgpu_device *adev = smu->adev; PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable; OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; @@ -963,12 +981,8 @@ static int navi10_print_clk_levels(struct smu_context *smu, } break; case SMU_PCIE: - gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & - PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) - >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & - PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) - >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu); + lane_width = smu_v11_0_get_current_pcie_link_width_level(smu); for (i = 0; i < NUM_LINK_LEVELS; i++) size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," : @@ -1331,10 +1345,14 @@ static bool navi10_is_dpm_running(struct smu_context *smu) { int ret = 0; uint32_t feature_mask[2]; - unsigned long feature_enabled; + uint64_t feature_enabled; + ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); - feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | - ((uint64_t)feature_mask[1] << 32)); + if (ret) + return false; + + feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0]; + return !!(feature_enabled & SMC_DPM_FEATURE); } @@ -1344,27 +1362,23 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - return navi10_get_smu_metrics_data(smu, - METRICS_CURR_FANSPEED, - speed); + switch (smu_v11_0_get_fan_control_mode(smu)) { + case AMD_FAN_CTRL_AUTO: + return navi10_get_smu_metrics_data(smu, + METRICS_CURR_FANSPEED, + speed); + default: + return smu_v11_0_get_fan_speed_rpm(smu, speed); + } } -static int navi10_get_fan_speed_percent(struct smu_context *smu, - uint32_t *speed) +static int navi10_get_fan_parameters(struct smu_context *smu) { - int ret = 0; - uint32_t percent = 0; - uint32_t current_rpm; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm); - if (ret) - return ret; - - percent = current_rpm * 100 / pptable->FanMaximumRpm; - *speed = percent > 100 ? 100 : percent; + smu->fan_max_rpm = pptable->FanMaximumRpm; - return ret; + return 0; } static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) @@ -2472,6 +2486,97 @@ static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter i2c_del_adapter(control); } +static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_0 *gpu_metrics = + (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; + struct amdgpu_device *adev = smu->adev; + SmuMetrics_NV12_t nv12_metrics = { 0 }; + SmuMetrics_t metrics; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + true); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t)); + if (adev->asic_type == CHIP_NAVI12) + memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t)); + + mutex_unlock(&smu->metrics_lock); + + smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureMem; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; + + if (adev->asic_type == CHIP_NAVI12) { + gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator; + gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency; + gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency; + gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage; + } + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; + + gpu_metrics->pcie_link_width = + smu_v11_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + smu_v11_0_get_current_pcie_link_speed(smu); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_0); +} + +static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t param = 0; + + /* Navi12 does not support this */ + if (adev->asic_type == CHIP_NAVI12) + return 0; + + /* Workaround for WS SKU */ + if (adev->pdev->device == 0x7312 && + adev->pdev->revision == 0) + param = 0xD188; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetMGpuFanBoostLimitRpm, + param, + NULL); +} static const struct pptable_funcs navi10_ppt_funcs = { .get_allowed_feature_mask = navi10_get_allowed_feature_mask, @@ -2488,7 +2593,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .display_config_changed = navi10_display_config_changed, .notify_smc_display_config = navi10_notify_smc_display_config, .is_dpm_running = navi10_is_dpm_running, - .get_fan_speed_percent = navi10_get_fan_speed_percent, .get_fan_speed_rpm = navi10_get_fan_speed_rpm, .get_power_profile_mode = navi10_get_power_profile_mode, .set_power_profile_mode = navi10_set_power_profile_mode, @@ -2532,7 +2636,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, @@ -2553,6 +2656,11 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_power_source = smu_v11_0_set_power_source, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .get_gpu_metrics = navi10_get_gpu_metrics, + .enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost, + .gfx_ulv_control = smu_v11_0_gfx_ulv_control, + .deep_sleep_control = smu_v11_0_deep_sleep_control, + .get_fan_parameters = navi10_get_fan_parameters, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h similarity index 96% rename from drivers/gpu/drm/amd/powerplay/navi10_ppt.h rename to drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h index 2abb4ba01db1a499c328506f75d78c084661b818..84dc5a1b683039ea8ec261c7a0a42f1a4db38f4f 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h @@ -49,9 +49,6 @@ #define NAVI10_VOLTAGE_SCALE (4) -#define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 - extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c similarity index 95% rename from drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c rename to drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 3865dbed5f935b89237107b8235c5be49f170396..5c22611d5878b5a215078a10820a99a0f2bdd1a3 100644 --- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -68,7 +68,8 @@ FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ - FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) + FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 @@ -95,6 +96,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), @@ -124,6 +126,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), }; static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = { @@ -228,6 +231,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | FEATURE_MASK(FEATURE_DPM_FCLK_BIT) + | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_DS_FCLK_BIT) @@ -385,14 +389,26 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) - return -ENOMEM; + goto err0_out; smu_table->metrics_time = 0; + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); if (!smu_table->watermarks_table) - return -ENOMEM; + goto err2_out; return 0; + +err2_out: + kfree(smu_table->gpu_metrics_table); +err1_out: + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; } static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, @@ -404,19 +420,13 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, int ret = 0; mutex_lock(&smu->metrics_lock); - if (!smu_table->metrics_time || - time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { - ret = smu_cmn_update_table(smu, - SMU_TABLE_SMU_METRICS, - 0, - smu_table->metrics_table, - false); - if (ret) { - dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); - mutex_unlock(&smu->metrics_lock); - return ret; - } - smu_table->metrics_time = jiffies; + + ret = smu_cmn_get_metrics_table_locked(smu, + NULL, + false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; } switch (member) { @@ -775,7 +785,7 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret; - if (adev->asic_type == CHIP_SIENNA_CICHLID) { + if (adev->vcn.num_vcn_inst > 1) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0x10000, NULL); if (ret) @@ -787,7 +797,7 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); if (ret) return ret; - if (adev->asic_type == CHIP_SIENNA_CICHLID) { + if (adev->vcn.num_vcn_inst > 1) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0x10000, NULL); if (ret) @@ -951,12 +961,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, } break; case SMU_PCIE: - gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & - PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) - >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & - PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) - >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + gen_speed = smu_v11_0_get_current_pcie_link_speed(smu); + lane_width = smu_v11_0_get_current_pcie_link_width(smu); for (i = 0; i < NUM_LINK_LEVELS; i++) size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," : @@ -1146,10 +1152,14 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu) { int ret = 0; uint32_t feature_mask[2]; - unsigned long feature_enabled; + uint64_t feature_enabled; + ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); - feature_enabled = (unsigned long)((uint64_t)feature_mask[0] | - ((uint64_t)feature_mask[1] << 32)); + if (ret) + return false; + + feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0]; + return !!(feature_enabled & SMC_DPM_FEATURE); } @@ -1159,27 +1169,23 @@ static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - return sienna_cichlid_get_smu_metrics_data(smu, - METRICS_CURR_FANSPEED, - speed); + switch (smu_v11_0_get_fan_control_mode(smu)) { + case AMD_FAN_CTRL_AUTO: + return sienna_cichlid_get_smu_metrics_data(smu, + METRICS_CURR_FANSPEED, + speed); + default: + return smu_v11_0_get_fan_speed_rpm(smu, speed); + } } -static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu, - uint32_t *speed) +static int sienna_cichlid_get_fan_parameters(struct smu_context *smu) { - int ret = 0; - uint32_t percent = 0; - uint32_t current_rpm; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = sienna_cichlid_get_fan_speed_rpm(smu, ¤t_rpm); - if (ret) - return ret; - - percent = current_rpm * 100 / pptable->FanMaximumRpm; - *speed = percent > 100 ? 100 : percent; + smu->fan_max_rpm = pptable->FanMaximumRpm; - return ret; + return 0; } static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *buf) @@ -1732,6 +1738,11 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, return ret; } +static int sienna_cichlid_run_btc(struct smu_context *smu) +{ + return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); +} + static bool sienna_cichlid_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2644,6 +2655,76 @@ static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_ i2c_del_adapter(control); } +static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_0 *gpu_metrics = + (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + &metrics, + true); + if (ret) + return ret; + + smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics); + + gpu_metrics->temperature_edge = metrics.TemperatureEdge; + gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; + gpu_metrics->temperature_mem = metrics.TemperatureMem; + gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; + gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; + gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; + gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage; + + gpu_metrics->average_socket_power = metrics.AverageSocketPower; + gpu_metrics->energy_accumulator = metrics.EnergyAccumulator; + + if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs; + else + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs; + gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs; + gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency; + gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency; + gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency; + gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency; + + gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0]; + gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0]; + gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1]; + gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; + + gpu_metrics->pcie_link_width = + smu_v11_0_get_current_pcie_link_width(smu); + gpu_metrics->pcie_link_speed = + smu_v11_0_get_current_pcie_link_speed(smu); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_0); +} + +static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetMGpuFanBoostLimitRpm, + 0, + NULL); +} static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, @@ -2659,7 +2740,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_config_changed = sienna_cichlid_display_config_changed, .notify_smc_display_config = sienna_cichlid_notify_smc_display_config, .is_dpm_running = sienna_cichlid_is_dpm_running, - .get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent, .get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm, .get_power_profile_mode = sienna_cichlid_get_power_profile_mode, .set_power_profile_mode = sienna_cichlid_set_power_profile_mode, @@ -2703,7 +2783,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, @@ -2719,8 +2798,14 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .mode1_reset = smu_v11_0_mode1_reset, .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .run_btc = sienna_cichlid_run_btc, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .get_gpu_metrics = sienna_cichlid_get_gpu_metrics, + .enable_mgpu_fan_boost = sienna_cichlid_enable_mgpu_fan_boost, + .gfx_ulv_control = smu_v11_0_gfx_ulv_control, + .deep_sleep_control = smu_v11_0_deep_sleep_control, + .get_fan_parameters = sienna_cichlid_get_fan_parameters, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h similarity index 92% rename from drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h rename to drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h index 8078886e4cbc5c8c2629036b87a91768707d3071..57e120c440eaeecb639ee6d2c23c54c6f12c2184 100644 --- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h @@ -31,7 +31,4 @@ typedef enum { extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); -#define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c similarity index 92% rename from drivers/gpu/drm/amd/powerplay/smu_v11_0.c rename to drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 7b950a582a28a9922b1f87a877ec1ba35ebd96ca..f5aeb0b5cf977874178f8a2e6d30490da9e90350 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -67,6 +67,19 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms +#define LINK_WIDTH_MAX 6 +#define LINK_SPEED_MAX 3 + +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE + +static int link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static int link_speed[] = {25, 50, 80, 160}; + int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -404,10 +417,12 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu) struct smu_table_context *smu_table = &smu->smu_table; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + kfree(smu_table->gpu_metrics_table); kfree(smu_table->boot_overdrive_table); kfree(smu_table->overdrive_table); kfree(smu_table->max_sustainable_clocks); kfree(smu_table->driver_pptable); + smu_table->gpu_metrics_table = NULL; smu_table->boot_overdrive_table = NULL; smu_table->overdrive_table = NULL; smu_table->max_sustainable_clocks = NULL; @@ -1084,35 +1099,6 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) return 0; } -int -smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) -{ - struct amdgpu_device *adev = smu->adev; - uint32_t duty100, duty; - uint64_t tmp64; - - if (speed > 100) - speed = 100; - - if (smu_v11_0_auto_fan_control(smu, 0)) - return -EINVAL; - - duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), - CG_FDO_CTRL1, FMAX_DUTY100); - if (!duty100) - return -EINVAL; - - tmp64 = (uint64_t)speed * duty100; - do_div(tmp64, 100); - duty = (uint32_t)tmp64; - - WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, - REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), - CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); - - return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); -} - int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) @@ -1121,7 +1107,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, switch (mode) { case AMD_FAN_CTRL_NONE: - ret = smu_v11_0_set_fan_speed_percent(smu, 100); + ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm); break; case AMD_FAN_CTRL_MANUAL: ret = smu_v11_0_auto_fan_control(smu, 0); @@ -1167,6 +1153,27 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, return ret; } +int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t tach_period, crystal_clock_freq; + uint64_t tmp64; + + tach_period = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), + CG_TACH_CTRL, TARGET_PERIOD); + if (!tach_period) + return -EINVAL; + + crystal_clock_freq = amdgpu_asic_get_xclk(adev); + + tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000; + do_div(tmp64, (tach_period * 8)); + *speed = (uint32_t)tmp64; + + return 0; +} + int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { @@ -1312,6 +1319,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, smu_v11_0_ack_ac_dc_interrupt(&adev->smu); break; case 0x7: + /* + * Increment the throttle interrupt counter + */ + atomic64_inc(&smu->throttle_int_counter); + if (!atomic_read(&adev->throttling_logging_enabled)) return 0; @@ -1913,3 +1925,99 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu, return ret; } + +int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) +{ + uint32_t width_level; + + width_level = smu_v11_0_get_current_pcie_link_width_level(smu); + if (width_level > LINK_WIDTH_MAX) + width_level = 0; + + return link_width[width_level]; +} + +int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} + +int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) +{ + uint32_t speed_level; + + speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu); + if (speed_level > LINK_SPEED_MAX) + speed_level = 0; + + return link_speed[speed_level]; +} + +void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics) +{ + memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0)); + + gpu_metrics->common_header.structure_size = + sizeof(struct gpu_metrics_v1_0); + gpu_metrics->common_header.format_revision = 1; + gpu_metrics->common_header.content_revision = 0; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); +} + +int smu_v11_0_gfx_ulv_control(struct smu_context *smu, + bool enablement) +{ + int ret = 0; + + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); + + return ret; +} + +int smu_v11_0_deep_sleep_control(struct smu_context *smu, + bool enablement) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); + if (ret) { + dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); + return ret; + } + } + + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); + if (ret) { + dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); + return ret; + } + } + + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); + if (ret) { + dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); + return ret; + } + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..67e53f7da3ceab7d780e75bc4ae0d09e70faab6a --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile @@ -0,0 +1,31 @@ +# +# Copyright 2020 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'smu manager' sub-component of powerplay. +# It provides the smu management services for the driver. + +SMU12_MGR = renoir_ppt.o \ + smu_v12_0.o + +AMD_SWSMU_SMU12MGR = $(addprefix $(AMD_SWSMU_PATH)/smu12/,$(SMU12_MGR)) + +AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU12MGR) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c similarity index 89% rename from drivers/gpu/drm/amd/powerplay/renoir_ppt.c rename to drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index dbb676c482fde957fcaf86c2cfcefadafb4a8b24..3b9ac72c7571d6de2e9063e9530dbcd0b9d88824 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -128,30 +128,6 @@ static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] = WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; -static int renoir_get_metrics_table(struct smu_context *smu, - SmuMetrics_t *metrics_table) -{ - struct smu_table_context *smu_table= &smu->smu_table; - int ret = 0; - - mutex_lock(&smu->metrics_lock); - if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { - ret = smu_cmn_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)smu_table->metrics_table, false); - if (ret) { - dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); - mutex_unlock(&smu->metrics_lock); - return ret; - } - smu_table->metrics_time = jiffies; - } - - memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); - mutex_unlock(&smu->metrics_lock); - - return ret; -} - static int renoir_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -166,18 +142,32 @@ static int renoir_init_smc_tables(struct smu_context *smu) smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); if (!smu_table->clocks_table) - return -ENOMEM; + goto err0_out; smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) - return -ENOMEM; + goto err1_out; smu_table->metrics_time = 0; smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); if (!smu_table->watermarks_table) - return -ENOMEM; + goto err2_out; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err3_out; return 0; + +err3_out: + kfree(smu_table->watermarks_table); +err2_out: + kfree(smu_table->metrics_table); +err1_out: + kfree(smu_table->clocks_table); +err0_out: + return -ENOMEM; } /** @@ -361,7 +351,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, memset(&metrics, 0, sizeof(metrics)); - ret = renoir_get_metrics_table(smu, &metrics); + ret = smu_cmn_get_metrics_table(smu, &metrics, false); if (ret) return ret; @@ -507,7 +497,7 @@ static int renoir_get_current_clk_freq_by_table(struct smu_context *smu, int ret = 0, clk_id = 0; SmuMetrics_t metrics; - ret = renoir_get_metrics_table(smu, &metrics); + ret = smu_cmn_get_metrics_table(smu, &metrics, false); if (ret) return ret; @@ -590,7 +580,7 @@ static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value) if (!value) return -EINVAL; - ret = renoir_get_metrics_table(smu, &metrics); + ret = smu_cmn_get_metrics_table(smu, &metrics, false); if (ret) return ret; @@ -610,7 +600,7 @@ static int renoir_get_current_activity_percent(struct smu_context *smu, if (!value) return -EINVAL; - ret = renoir_get_metrics_table(smu, &metrics); + ret = smu_cmn_get_metrics_table(smu, &metrics, false); if (ret) return ret; @@ -626,6 +616,44 @@ static int renoir_get_current_activity_percent(struct smu_context *smu, return 0; } +static int renoir_get_vddc(struct smu_context *smu, uint32_t *value, + unsigned int index) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (index >= 2) + return -EINVAL; + + if (!value) + return -EINVAL; + + ret = smu_cmn_get_metrics_table(smu, &metrics, false); + if (ret) + return ret; + + *value = metrics.Voltage[index]; + + return 0; +} + +static int renoir_get_power(struct smu_context *smu, uint32_t *value) +{ + int ret = 0; + SmuMetrics_t metrics; + + if (!value) + return -EINVAL; + + ret = smu_cmn_get_metrics_table(smu, &metrics, false); + if (ret) + return ret; + + *value = metrics.CurrentSocketPower << 8; + + return 0; +} + /** * This interface get dpm clock table for dc */ @@ -962,6 +990,18 @@ static int renoir_read_sensor(struct smu_context *smu, *(uint32_t *)data *= 100; *size = 4; break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = renoir_get_vddc(smu, (uint32_t *)data, 0); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDNB: + ret = renoir_get_vddc(smu, (uint32_t *)data, 1); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = renoir_get_power(smu, (uint32_t *)data); + *size = 4; + break; default: ret = -EOPNOTSUPP; break; @@ -987,6 +1027,65 @@ static bool renoir_is_dpm_running(struct smu_context *smu) } +static ssize_t renoir_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_0 *gpu_metrics = + (struct gpu_metrics_v2_0 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics); + + gpu_metrics->temperature_gfx = metrics.GfxTemperature; + gpu_metrics->temperature_soc = metrics.SocTemperature; + memcpy(&gpu_metrics->temperature_core[0], + &metrics.CoreTemperature[0], + sizeof(uint16_t) * 8); + gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; + gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; + + gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; + gpu_metrics->average_mm_activity = metrics.AverageUvdActivity; + + gpu_metrics->average_socket_power = metrics.CurrentSocketPower; + gpu_metrics->average_cpu_power = metrics.Power[0]; + gpu_metrics->average_soc_power = metrics.Power[1]; + memcpy(&gpu_metrics->average_core_power[0], + &metrics.CorePower[0], + sizeof(uint16_t) * 8); + + gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency; + + gpu_metrics->current_gfxclk = metrics.ClockFrequency[CLOCK_GFXCLK]; + gpu_metrics->current_socclk = metrics.ClockFrequency[CLOCK_SOCCLK]; + gpu_metrics->current_uclk = metrics.ClockFrequency[CLOCK_UMCCLK]; + gpu_metrics->current_fclk = metrics.ClockFrequency[CLOCK_FCLK]; + gpu_metrics->current_vclk = metrics.ClockFrequency[CLOCK_VCLK]; + gpu_metrics->current_dclk = metrics.ClockFrequency[CLOCK_DCLK]; + memcpy(&gpu_metrics->current_coreclk[0], + &metrics.CoreFrequency[0], + sizeof(uint16_t) * 8); + gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; + gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1]; + + gpu_metrics->throttle_status = metrics.ThrottlerStatus; + + gpu_metrics->fan_pwm = metrics.FanPwm; + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_0); +} + static const struct pptable_funcs renoir_ppt_funcs = { .set_power_state = NULL, .print_clk_levels = renoir_print_clk_levels, @@ -1021,6 +1120,7 @@ static const struct pptable_funcs renoir_ppt_funcs = { .is_dpm_running = renoir_is_dpm_running, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .get_gpu_metrics = renoir_get_gpu_metrics, }; void renoir_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h similarity index 100% rename from drivers/gpu/drm/amd/powerplay/renoir_ppt.h rename to drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c similarity index 95% rename from drivers/gpu/drm/amd/powerplay/smu_v12_0.c rename to drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 31456437bb18e9866f7b61835ba5a88da3e84d04..660f403d5770ce9935b41d6b18232afad64d63fc 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -274,3 +274,15 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) return ret; } + +void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics) +{ + memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0)); + + gpu_metrics->common_header.structure_size = + sizeof(struct gpu_metrics_v2_0); + gpu_metrics->common_header.format_revision = 2; + gpu_metrics->common_header.content_revision = 0; + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); +} diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c similarity index 93% rename from drivers/gpu/drm/amd/powerplay/smu_cmn.c rename to drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 5c23c44c33bdd357200e9790678ab0165baea951..a58ea08cd115362c37234eefc00b9e6a9dbaf044 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -604,7 +604,7 @@ int smu_cmn_update_table(struct smu_context *smu, memcpy(table_data, table->cpu_addr, table_size); } - return ret; + return 0; } int smu_cmn_write_watermarks_table(struct smu_context *smu) @@ -631,3 +631,48 @@ int smu_cmn_write_pptable(struct smu_context *smu) pptable, true); } + +int smu_cmn_get_metrics_table_locked(struct smu_context *smu, + void *metrics_table, + bool bypass_cache) +{ + struct smu_table_context *smu_table= &smu->smu_table; + uint32_t table_size = + smu_table->tables[SMU_TABLE_SMU_METRICS].size; + int ret = 0; + + if (bypass_cache || + !smu_table->metrics_time || + time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_SMU_METRICS, + 0, + smu_table->metrics_table, + false); + if (ret) { + dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + if (metrics_table) + memcpy(metrics_table, smu_table->metrics_table, table_size); + + return 0; +} + +int smu_cmn_get_metrics_table(struct smu_context *smu, + void *metrics_table, + bool bypass_cache) +{ + int ret = 0; + + mutex_lock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table_locked(smu, + metrics_table, + bypass_cache); + mutex_unlock(&smu->metrics_lock); + + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h similarity index 92% rename from drivers/gpu/drm/amd/powerplay/smu_cmn.h rename to drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 98face8c5fd61b9d0149c0fb94ed76196bdd4bec..6d00ad740c27883cdb28aba058ccb3d21badff58 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -79,5 +79,13 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu); int smu_cmn_write_pptable(struct smu_context *smu); +int smu_cmn_get_metrics_table_locked(struct smu_context *smu, + void *metrics_table, + bool bypass_cache); + +int smu_cmn_get_metrics_table(struct smu_context *smu, + void *metrics_table, + bool bypass_cache); + #endif #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h similarity index 96% rename from drivers/gpu/drm/amd/powerplay/smu_internal.h rename to drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 264073d4e2635d2241176f46cbb8c4c0ec18280c..38c10177ed2125c4751b3ce089b0d7e94cccc629 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -92,6 +92,9 @@ #define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu) #define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf) #define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask) +#define smu_gfx_ulv_control(smu, enablement) smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement) +#define smu_deep_sleep_control(smu, enablement) smu_ppt_funcs(deep_sleep_control, 0, smu, enablement) +#define smu_get_fan_parameters(smu) smu_ppt_funcs(get_fan_parameters, 0, smu) #endif #endif diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index ca570b1354787a94e46e501967aeacc077b4cc2a..e9de542f9b7c5ef4837be8442b32fe4a8fde1604 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -532,7 +532,7 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH); malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE); - /* fall through */ + fallthrough; case 1: malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH); @@ -869,7 +869,7 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH); malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE); - /* fall through */ + fallthrough; case 1: malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW); malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH); @@ -1324,7 +1324,7 @@ static irqreturn_t malidp_se_irq(int irq, void *arg) break; case MW_RESTART: drm_writeback_signal_completion(&malidp->mw_connector, 0); - /* fall through - to a new start */ + fallthrough; /* to a new start */ case MW_START: /* writeback started, need to emulate one-shot mode */ hw->disable_memwrite(hwdev); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d62749a10cdd80cd7ceb244b9ee13450ca77a6f7..77066bca87939f180d80f4ae25571e1e269bbd88 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -240,7 +240,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->dp501_fw_addr = NULL; } } - /* fallthrough */ + fallthrough; case 0x0c: ast->tx_chip_type = AST_TX_DP501; } diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index ce94f797d090f3cef01db72ef64665db54e8213e..66b67402f1acd57dcf367b8e742b8c5ea45aae23 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -409,7 +409,6 @@ static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) switch (data_type) { case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: - fallthrough; case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: if (xfer->msg->rx_len > 1) { /* read second byte */ @@ -418,7 +417,6 @@ static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) } fallthrough; case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: - fallthrough; case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: if (xfer->msg->rx_len > 0) { /* read first byte */ diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index d7e65c8694153474f0e3818bed7761ac967a3e50..9fef6413741dcbd674d78cae5d5478aa0bb63ce0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -61,10 +61,10 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data, switch (hparms->channels) { case 7 ... 8: conf0 |= HDMI_AUD_CONF0_I2S_EN3; - /* Fall-thru */ + fallthrough; case 5 ... 6: conf0 |= HDMI_AUD_CONF0_I2S_EN2; - /* Fall-thru */ + fallthrough; case 3 ... 4: conf0 |= HDMI_AUD_CONF0_I2S_EN1; /* Fall-thru */ diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 454544e0da7de335a8fde074ff4e8ada85dddf59..ecdf9b01340f5e6413f10c26054a722560ea72d2 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -601,13 +601,13 @@ static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata, DRM_DEV_ERROR(pdata->dev, "Unexpected max rate (%#x); assuming 5.4 GHz\n", (int)dpcd_val); - /* fall through */ + fallthrough; case DP_LINK_BW_5_4: rate_valid[7] = 1; - /* fall through */ + fallthrough; case DP_LINK_BW_2_7: rate_valid[4] = 1; - /* fall through */ + fallthrough; case DP_LINK_BW_1_62: rate_valid[1] = 1; break; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f68c69a45752a232ecc79f763617cdca6fd25608..9e1ad493e68919aed1d6d9154f52e77e82f06e59 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -3106,7 +3107,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) if (ret) DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); } EXPORT_SYMBOL(drm_atomic_helper_shutdown); @@ -3246,7 +3247,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) } unlock: - DRM_MODESET_LOCK_ALL_END(ctx, err); + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); if (err) return ERR_PTR(err); @@ -3327,7 +3328,7 @@ int drm_atomic_helper_resume(struct drm_device *dev, err = drm_atomic_helper_commit_duplicated_state(state, &ctx); - DRM_MODESET_LOCK_ALL_END(ctx, err); + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); drm_atomic_state_put(state); return err; diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index a0735fbc144b2aa26386230a0a74ba800afb7e4d..7a01d0918861cc83de536907e70b7b8e7c798553 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -537,7 +537,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) switch (map->type) { case _DRM_REGISTERS: iounmap(map->handle); - /* FALLTHROUGH */ + fallthrough; case _DRM_FRAME_BUFFER: arch_phys_wc_del(map->mtrr); break; diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index c93123ff7c2186beead154597b21ba58f81c93c9..138ff34b31db53aaf4cfc90f0fc198a7ce51de64 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -294,7 +294,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, crtc->gamma_size, &ctx); out: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 283bcc4362ca5870acfd8563adb225a708ea0821..aecdd7ea26dc844c427091edc5a72aa81c1f3308 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -588,7 +588,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) return -EACCES; - mutex_lock(&crtc->dev->mode_config.mutex); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); @@ -756,8 +755,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, fb = NULL; mode = NULL; - DRM_MODESET_LOCK_ALL_END(ctx, ret); - mutex_unlock(&crtc->dev->mode_config.mutex); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 4c21cf69dad5ac5e07b81be34b21b46608386220..1e7c638873c826716020308957c4b9c13a7d86c5 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -423,6 +423,133 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, } EXPORT_SYMBOL(drm_dp_send_real_edid_checksum); +static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 port_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_PORT_COUNT_MASK; + + if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE && port_count > 4) + port_count = 4; + + return port_count; +} + +static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 dpcd_ext[6]; + int ret; + + /* + * Prior to DP1.3 the bit represented by + * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. + * If it is set DP_DPCD_REV at 0000h could be at a value less than + * the true capability of the panel. The only way to check is to + * then compare 0000h and 2200h. + */ + if (!(dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) + return 0; + + ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext, + sizeof(dpcd_ext)); + if (ret < 0) + return ret; + if (ret != sizeof(dpcd_ext)) + return -EIO; + + if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { + DRM_DEBUG_KMS("%s: Extended DPCD rev less than base DPCD rev (%d > %d)\n", + aux->name, dpcd[DP_DPCD_REV], + dpcd_ext[DP_DPCD_REV]); + return 0; + } + + if (!memcmp(dpcd, dpcd_ext, sizeof(dpcd_ext))) + return 0; + + DRM_DEBUG_KMS("%s: Base DPCD: %*ph\n", + aux->name, DP_RECEIVER_CAP_SIZE, dpcd); + + memcpy(dpcd, dpcd_ext, sizeof(dpcd_ext)); + + return 0; +} + +/** + * drm_dp_read_dpcd_caps() - read DPCD caps and extended DPCD caps if + * available + * @aux: DisplayPort AUX channel + * @dpcd: Buffer to store the resulting DPCD in + * + * Attempts to read the base DPCD caps for @aux. Additionally, this function + * checks for and reads the extended DPRX caps (%DP_DP13_DPCD_REV) if + * present. + * + * Returns: %0 if the DPCD was read successfully, negative error code + * otherwise. + */ +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE); + if (ret < 0) + return ret; + if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0) + return -EIO; + + ret = drm_dp_read_extended_dpcd_caps(aux, dpcd); + if (ret < 0) + return ret; + + DRM_DEBUG_KMS("%s: DPCD: %*ph\n", + aux->name, DP_RECEIVER_CAP_SIZE, dpcd); + + return ret; +} +EXPORT_SYMBOL(drm_dp_read_dpcd_caps); + +/** + * drm_dp_read_downstream_info() - read DPCD downstream port info if available + * @aux: DisplayPort AUX channel + * @dpcd: A cached copy of the port's DPCD + * @downstream_ports: buffer to store the downstream port info in + * + * See also: + * drm_dp_downstream_max_clock() + * drm_dp_downstream_max_bpc() + * + * Returns: 0 if either the downstream port info was read successfully or + * there was no downstream info to read, or a negative error code otherwise. + */ +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]) +{ + int ret; + u8 len; + + memset(downstream_ports, 0, DP_MAX_DOWNSTREAM_PORTS); + + /* No downstream info to read */ + if (!drm_dp_is_branch(dpcd) || + dpcd[DP_DPCD_REV] < DP_DPCD_REV_10 || + !(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) + return 0; + + len = drm_dp_downstream_port_count(dpcd); + if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) + len *= 4; + + ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); + if (ret < 0) + return ret; + + return ret == len ? 0 : -EIO; +} +EXPORT_SYMBOL(drm_dp_read_downstream_info); + /** * drm_dp_downstream_max_clock() - extract branch device max * pixel rate for legacy VGA @@ -431,7 +558,11 @@ EXPORT_SYMBOL(drm_dp_send_real_edid_checksum); * @dpcd: DisplayPort configuration data * @port_cap: port capabilities * - * Returns max clock in kHz on success or 0 if max clock not defined + * See also: + * drm_dp_read_downstream_info() + * drm_dp_downstream_max_bpc() + * + * Returns: Max clock in kHz on success or 0 if max clock not defined */ int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]) @@ -462,7 +593,11 @@ EXPORT_SYMBOL(drm_dp_downstream_max_clock); * @dpcd: DisplayPort configuration data * @port_cap: port capabilities * - * Returns max bpc on success or 0 if max bpc not defined + * See also: + * drm_dp_read_downstream_info() + * drm_dp_downstream_max_clock() + * + * Returns: Max bpc on success or 0 if max bpc not defined */ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]) @@ -492,7 +627,7 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], case DP_DS_16BPC: return 16; } - /* fall through */ + fallthrough; default: return 0; } @@ -668,6 +803,54 @@ void drm_dp_set_subconnector_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_dp_set_subconnector_property); +/** + * drm_dp_read_sink_count_cap() - Check whether a given connector has a valid sink + * count + * @connector: The DRM connector to check + * @dpcd: A cached copy of the connector's DPCD RX capabilities + * @desc: A cached copy of the connector's DP descriptor + * + * See also: drm_dp_read_sink_count() + * + * Returns: %True if the (e)DP connector has a valid sink count that should + * be probed, %false otherwise. + */ +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc) +{ + /* Some eDP panels don't set a valid value for the sink count */ + return connector->connector_type != DRM_MODE_CONNECTOR_eDP && + dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 && + dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT && + !drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT); +} +EXPORT_SYMBOL(drm_dp_read_sink_count_cap); + +/** + * drm_dp_read_sink_count() - Retrieve the sink count for a given sink + * @aux: The DP AUX channel to use + * + * See also: drm_dp_read_sink_count_cap() + * + * Returns: The current sink count reported by @aux, or a negative error code + * otherwise. + */ +int drm_dp_read_sink_count(struct drm_dp_aux *aux) +{ + u8 count; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count); + if (ret < 0) + return ret; + if (ret != 1) + return -EIO; + + return DP_GET_SINK_COUNT(count); +} +EXPORT_SYMBOL(drm_dp_read_sink_count); + /* * I2C-over-AUX implementation */ diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 166405d4c536ea24c779def5500359e217f8bf7b..b9c5a98c5c9ce38e43d098067df50c379e705ab2 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3488,6 +3488,28 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) return dp_link_bw * dp_link_count / 2; } +/** + * drm_dp_read_mst_cap() - check whether or not a sink supports MST + * @aux: The DP AUX channel to use + * @dpcd: A cached copy of the DPCD capabilities for this sink + * + * Returns: %True if the sink supports MST, %false otherwise + */ +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 mstm_cap; + + if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) + return false; + + if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) + return false; + + return mstm_cap & DP_MST_CAP; +} +EXPORT_SYMBOL(drm_dp_read_mst_cap); + /** * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager * @mgr: manager to set state for @@ -5042,8 +5064,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm crtc = conn_state->crtc; - if (WARN_ON(!crtc)) - return -EINVAL; + if (!crtc) + continue; if (!drm_dp_mst_dsc_aux_for_port(pos->port)) continue; diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index 901b078abf40c5d7bf1819e6f6cd4ac256c4f774..db05f386a709e8d9cd5edc305455fb31e6e25128 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -428,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, out_unref: drm_mode_object_put(obj); out: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } @@ -470,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj, break; } drm_property_change_valid_put(prop, ref); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 14b6f7638728c3ebd1c19ae628dfd34acc553386..501b4fe55a3dbc038652d49c3115baed38da459c 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1930,7 +1930,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, default: WARN(1, "Invalid aspect ratio (0%x) on mode\n", in->picture_aspect_ratio); - /* fall through */ + fallthrough; case HDMI_PICTURE_ASPECT_NONE: out->flags |= DRM_MODE_FLAG_PIC_AR_NONE; break; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index b7b90b3a2e389a3f623c9e14c5edd126e6712a16..affe1cfed0098024a3b0b8674495deae4de597b3 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -792,7 +792,7 @@ static int setplane_internal(struct drm_plane *plane, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, &ctx); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index d5a4cd85a0f6dcb82fcd4a4d6bf304761c571e01..c6404b8d067f10279221dad7eb925a1650ae7534 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -337,9 +337,16 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); - gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID); - gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); + + /* + * Reading these two registers on GC600 rev 0x19 result in a + * unhandled fault: external abort on non-linefetch + */ + if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) { + gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); + gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); + } /* * !!!! HACK ALERT !!!! diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 4e3e95dce6d876151863ffb16cf0a52b5f7110e2..cd46c882269cc959070822de22ea9a61d09cf544 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) u32 dma_addr; int change; + /* block scheduler */ + drm_sched_stop(&gpu->sched, sched_job); + /* * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ if (dma_fence_is_signaled(submit->out_fence)) - return; + goto out_no_timeout; /* * If the GPU is still making forward progress on the front-end (which @@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) change = dma_addr - gpu->hangcheck_dma_addr; if (change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; - return; + goto out_no_timeout; } - /* block scheduler */ - drm_sched_stop(&gpu->sched, sched_job); - if(sched_job) drm_sched_increase_karma(sched_job); @@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) drm_sched_resubmit_jobs(&gpu->sched); +out_no_timeout: /* restart scheduler after GPU is usable again */ drm_sched_start(&gpu->sched, true); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 843dfcefc46a090e9ae016ad00cac968f5454c41..1a1a2853a8429d72e100c0fc09d3b8a46ae2111f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -987,10 +987,10 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, switch (length) { case 3: reg |= payload[2] << 16; - /* Fall through */ + fallthrough; case 2: reg |= payload[1] << 8; - /* Fall through */ + fallthrough; case 1: reg |= payload[0]; exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg); @@ -1038,7 +1038,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, payload[1] = reg >> 16; ++xfer->rx_done; } - /* Fall through */ + fallthrough; case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: payload[0] = reg >> 8; @@ -1082,10 +1082,10 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, switch (length) { case 3: payload[2] = (reg >> 16) & 0xff; - /* Fall through */ + fallthrough; case 2: payload[1] = (reg >> 8) & 0xff; - /* Fall through */ + fallthrough; case 1: payload[0] = reg & 0xff; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 56a2b47e1af79fd254e992abdd4d256fbd0dbe3e..5147f5929be720d0f3bfb6d9337f5584cf7a749f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -92,7 +92,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, offset = fbi->var.xoffset * fb->format->cpp[0]; offset += fbi->var.yoffset * fb->pitches[0]; - fbi->screen_base = exynos_gem->kvaddr + offset; + fbi->screen_buffer = exynos_gem->kvaddr + offset; fbi->screen_size = size; fbi->fix.smem_len = size; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 7445748288da93723010b3f3466df539bf6d402d..74e926abeff0c1fd95f1830757a406e772bee578 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -40,7 +40,7 @@ struct exynos_drm_gem { unsigned int flags; unsigned long size; void *cookie; - void __iomem *kvaddr; + void *kvaddr; dma_addr_t dma_addr; unsigned long dma_attrs; struct sg_table *sgt; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 86fac677fe69b6594507c212e2485fd128e22740..3c6d9f3913d562c3270c226043ef0bd0998f1245 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -101,19 +101,19 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, break; case DRM_FORMAT_ARGB8888: alpha = DCU_LAYER_AB_WHOLE_FRAME; - /* fall-through */ + fallthrough; case DRM_FORMAT_XRGB8888: bpp = FSL_DCU_ARGB8888; break; case DRM_FORMAT_ARGB4444: alpha = DCU_LAYER_AB_WHOLE_FRAME; - /* fall-through */ + fallthrough; case DRM_FORMAT_XRGB4444: bpp = FSL_DCU_ARGB4444; break; case DRM_FORMAT_ARGB1555: alpha = DCU_LAYER_AB_WHOLE_FRAME; - /* fall-through */ + fallthrough; case DRM_FORMAT_XRGB1555: bpp = FSL_DCU_ARGB1555; break; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 8c55f5bee9ab6a71f8d8b376d5c5cdb88efe34d9..f4053dd6bde9028e7d4fd4297e913b167e5b626c 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -712,7 +712,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, switch (intel_dsi->pixel_format) { default: MISSING_CASE(intel_dsi->pixel_format); - /* fallthrough */ + fallthrough; case MIPI_DSI_FMT_RGB565: tmp |= PIX_FMT_RGB565; break; @@ -739,7 +739,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, switch (intel_dsi->video_mode_format) { default: MISSING_CASE(intel_dsi->video_mode_format); - /* fallthrough */ + fallthrough; case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS: tmp |= VIDEO_MODE_SYNC_EVENT; break; @@ -792,7 +792,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, switch (pipe) { default: MISSING_CASE(pipe); - /* fallthrough */ + fallthrough; case PIPE_A: tmp |= TRANS_DDI_EDP_INPUT_A_ON; break; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c53c85d38fa5277e46e501ea3958bd17272bae8f..a0a41ec5c341908a9c53bb260c1c6cfba127cc4e 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -905,7 +905,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) drm_dbg_kms(&dev_priv->drm, "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp1_wakeup_time); - /* fallthrough */ + fallthrough; case 2: dev_priv->vbt.psr.tp1_wakeup_time_us = 2500; break; @@ -925,7 +925,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) drm_dbg_kms(&dev_priv->drm, "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", psr_table->tp2_tp3_wakeup_time); - /* fallthrough */ + fallthrough; case 2: dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500; break; @@ -1775,7 +1775,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, switch (child->hdmi_max_data_rate) { default: MISSING_CASE(child->hdmi_max_data_rate); - /* fall through */ + fallthrough; case HDMI_MAX_DATA_RATE_PLATFORM: max_tmds_clock = 0; break; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index bb91dace304ad978302e6206387178f151cd5b68..577c4441f32dfb86f136e6c8fabee8156c1eef54 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -326,7 +326,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, default: drm_err(&dev_priv->drm, "Unknown pnv display core clock 0x%04x\n", gcfgc); - /* fall through */ + fallthrough; case GC_DISPLAY_CLOCK_133_MHZ_PNV: cdclk_config->cdclk = 133333; break; @@ -766,7 +766,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, switch (cdclk) { default: MISSING_CASE(cdclk); - /* fall through */ + fallthrough; case 337500: val |= LCPLL_CLK_FREQ_337_5_BDW; break; @@ -1042,7 +1042,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); - /* fall through */ + fallthrough; case 308571: case 337500: freq_select = CDCLK_FREQ_337_308; @@ -1333,7 +1333,7 @@ static void icl_readout_refclk(struct drm_i915_private *dev_priv, switch (dssm) { default: MISSING_CASE(dssm); - /* fall through */ + fallthrough; case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: cdclk_config->ref = 24000; break; @@ -1561,7 +1561,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, drm_WARN_ON(&dev_priv->drm, cdclk != dev_priv->cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); - /* fall through */ + fallthrough; case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; break; @@ -2677,7 +2677,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_de_write(dev_priv, GMBUSFREQ_VLV, - DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); + DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); } static int cnp_rawclk(struct drm_i915_private *dev_priv) @@ -2903,9 +2903,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.get_cdclk = i85x_get_cdclk; else if (IS_I845G(dev_priv)) dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk; - else { /* 830 */ - drm_WARN(&dev_priv->drm, !IS_I830(dev_priv), - "Unknown platform. Assuming 133 MHz CDCLK\n"); + else if (IS_I830(dev_priv)) + dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; + + if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk, + "Unknown platform. Assuming 133 MHz CDCLK\n")) dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; - } } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 5731806e4cee964c2b6072027a6f777dcb4946e8..6b31fde4be16413fee8e31a0c3bdd901ca44afb0 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -17,8 +17,8 @@ struct intel_atomic_state; struct intel_crtc_state; struct intel_cdclk_vals { - u16 refclk; u32 cdclk; + u16 refclk; u8 divider; /* CD2X divider * 2 */ u8 ratio; }; diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index eccaa79cb4a9d97f36ebcdab1226616153e24a17..157d8c8c605a1c0d3caca74e7026906a81459bdc 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -52,7 +52,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); - /* fall through */ + fallthrough; case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; break; @@ -258,7 +258,7 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy) static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, enum phy phy) { - bool ret; + bool ret = true; u32 expected_val = 0; if (!icl_combo_phy_enabled(dev_priv, phy)) @@ -276,7 +276,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, DCC_MODE_SELECT_CONTINUOSLY); } - ret = cnl_verify_procmon_ref_values(dev_priv, phy); + ret &= cnl_verify_procmon_ref_values(dev_priv, phy); if (phy_is_master(dev_priv, phy)) { ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), @@ -320,7 +320,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, break; default: MISSING_CASE(lane_count); - /* fall-through */ + fallthrough; case 4: lane_mask = PWR_UP_ALL_LANES; break; @@ -337,7 +337,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, break; default: MISSING_CASE(lane_count); - /* fall-through */ + fallthrough; case 4: lane_mask = PWR_UP_ALL_LANES; break; diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c index f22a7645c249d710ac04ea0d0ecb25b6936a4bf4..d5db16764619c8621c5fecfe84fbc0b40b75d0ba 100644 --- a/drivers/gpu/drm/i915/display/intel_csr.c +++ b/drivers/gpu/drm/i915/display/intel_csr.c @@ -40,12 +40,12 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE -#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin" -#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1) +#define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin" +#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2) MODULE_FIRMWARE(RKL_CSR_PATH); -#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin" -#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6) +#define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin" +#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8) #define TGL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(TGL_CSR_PATH); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2c484b55bcdf7a333fa406be658f6c00221d7e69..19ac6b2a664cc1df99c22c7ea4bf439f51cb8391 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -706,6 +706,42 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; +static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ + { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ + { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ + { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ + { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +/* + * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries + * that DisplayPort specification requires + */ +static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = { + /* VS pre-emp */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */ +}; + +static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table) +{ + return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl; +} + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { @@ -1050,9 +1086,26 @@ static const struct cnl_ddi_buf_trans * tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.hobl) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!intel_dp->hobl_failed && rate <= 540000) { + /* Same table applies to TGL, RKL and DG1 */ + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); + return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + } + } + if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { return icl_get_combo_buf_trans(encoder, type, rate, n_entries); } else if (rate > 270000) { + if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); + return tgl_uy_combo_phy_ddi_translations_dp_hbr2; + } + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); return tgl_combo_phy_ddi_translations_dp_hbr2; } @@ -1888,7 +1941,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK); - /* fallthrough */ + fallthrough; case TRANS_DDI_EDP_INPUT_A_ON: case TRANS_DDI_EDP_INPUT_A_ONOFF: *pipe_mask = BIT(PIPE_A); @@ -2392,6 +2445,15 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, level = n_entries - 1; } + if (type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED; + intel_dp->hobl_active = is_hobl_buf_trans(ddi_translations); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val, + intel_dp->hobl_active ? val : 0); + } + /* Set PORT_TX_DW5 */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | @@ -2802,7 +2864,9 @@ hsw_set_signal_levels(struct intel_dp *intel_dp) static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, enum phy phy) { - if (intel_phy_is_combo(dev_priv, phy)) { + if (IS_ROCKETLAKE(dev_priv)) { + return RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + } else if (intel_phy_is_combo(dev_priv, phy)) { return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } else if (intel_phy_is_tc(dev_priv, phy)) { enum tc_port tc_port = intel_port_to_tc(dev_priv, @@ -2829,6 +2893,16 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0); if (intel_phy_is_combo(dev_priv, phy)) { + u32 mask, sel; + + if (IS_ROCKETLAKE(dev_priv)) { + mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + sel = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); + } else { + mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + sel = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); + } + /* * Even though this register references DDIs, note that we * want to pass the PHY rather than the port (DDI). For @@ -2839,8 +2913,8 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, * Clock Select chooses the PLL for both DDIA and DDID and * drives port A in all cases." */ - val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); + val &= ~mask; + val |= sel; intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); } @@ -4037,8 +4111,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) intel_wait_ddi_buf_idle(dev_priv, port); } - dp_tp_ctl = DP_TP_CTL_ENABLE | - DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; + dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1; if (intel_dp->link_mst) dp_tp_ctl |= DP_TP_CTL_MODE_MST; else { @@ -4061,16 +4134,10 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); - enum port port = dp_to_dig_port(intel_dp)->base.port; u32 temp; temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); - if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) - temp |= DP_TP_CTL_SCRAMBLE_DISABLE; - else - temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; switch (dp_train_pat & train_pat_mask) { case DP_TRAINING_PATTERN_DISABLE: @@ -4091,9 +4158,6 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp, } intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); - - intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); - intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); } static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp) @@ -4268,7 +4332,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->hdmi_scrambling = true; if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) pipe_config->hdmi_high_tmds_clock_ratio = true; - /* fall through */ + fallthrough; case TRANS_DDI_MODE_SELECT_DVI: pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); pipe_config->lane_count = 4; @@ -4878,6 +4942,13 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port) return max_lanes; } +static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy) +{ + return i915->hti_state & HDPORT_ENABLED && + (i915->hti_state & HDPORT_PHY_USED_DP(phy) || + i915->hti_state & HDPORT_PHY_USED_HDMI(phy)); +} + void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { struct intel_digital_port *dig_port; @@ -4885,6 +4956,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) bool init_hdmi, init_dp, init_lspcon = false; enum phy phy = intel_port_to_phy(dev_priv, port); + /* + * On platforms with HTI (aka HDPORT), if it's enabled at boot it may + * have taken over some of the PHYs and made them unavailable to the + * driver. In that case we should skip initializing the corresponding + * outputs. + */ + if (hti_uses_phy(dev_priv, phy)) { + drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n", + port_name(port), phy_name(phy)); + return; + } + init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) || intel_bios_port_supports_hdmi(dev_priv, port); init_dp = intel_bios_port_supports_dp(dev_priv, port); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 729ec6e0d43a064f2af57e53bf0a3763a25b6521..ddb588f863f8cc71a729741247929bfc54f0229d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -47,6 +47,7 @@ #include "display/intel_ddi.h" #include "display/intel_dp.h" #include "display/intel_dp_mst.h" +#include "display/intel_dpll_mgr.h" #include "display/intel_dsi.h" #include "display/intel_dvo.h" #include "display/intel_gmbus.h" @@ -2029,12 +2030,12 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) case I915_FORMAT_MOD_Y_TILED_CCS: if (is_ccs_plane(fb, color_plane)) return 128; - /* fall through */ + fallthrough; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (is_ccs_plane(fb, color_plane)) return 64; - /* fall through */ + fallthrough; case I915_FORMAT_MOD_Y_TILED: if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv)) return 128; @@ -2043,7 +2044,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) case I915_FORMAT_MOD_Yf_TILED_CCS: if (is_ccs_plane(fb, color_plane)) return 128; - /* fall through */ + fallthrough; case I915_FORMAT_MOD_Yf_TILED: switch (cpp) { case 1: @@ -2185,7 +2186,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (is_semiplanar_uv_plane(fb, color_plane)) return intel_tile_row_size(fb, color_plane); - /* Fall-through */ + fallthrough; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: return 16 * 1024; case I915_FORMAT_MOD_Y_TILED_CCS: @@ -2194,7 +2195,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, if (INTEL_GEN(dev_priv) >= 12 && is_semiplanar_uv_plane(fb, color_plane)) return intel_tile_row_size(fb, color_plane); - /* Fall-through */ + fallthrough; case I915_FORMAT_MOD_Yf_TILED: return 1 * 1024 * 1024; default: @@ -2310,7 +2311,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) { - i915_gem_object_lock(vma->obj); + i915_gem_object_lock(vma->obj, NULL); if (flags & PLANE_HAS_FENCE) i915_vma_unpin_fence(vma); i915_gem_object_unpin_from_display_plane(vma); @@ -3450,7 +3451,7 @@ initial_plane_vma(struct drm_i915_private *i915, if (IS_ERR(vma)) goto err_obj; - if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) + if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) goto err_obj; if (i915_gem_object_is_tiled(obj) && @@ -3761,6 +3762,44 @@ static int glk_max_plane_width(const struct drm_framebuffer *fb, } } +static int icl_min_plane_width(const struct drm_framebuffer *fb) +{ + /* Wa_14011264657, Wa_14011050563: gen11+ */ + switch (fb->format->format) { + case DRM_FORMAT_C8: + return 18; + case DRM_FORMAT_RGB565: + return 10; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + return 6; + case DRM_FORMAT_NV12: + return 20; + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return 12; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + return 4; + default: + return 1; + } +} + static int icl_max_plane_width(const struct drm_framebuffer *fb, int color_plane, unsigned int rotation) @@ -3843,29 +3882,31 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) int y = plane_state->uapi.src.y1 >> 16; int w = drm_rect_width(&plane_state->uapi.src) >> 16; int h = drm_rect_height(&plane_state->uapi.src) >> 16; - int max_width; - int max_height; - u32 alignment; - u32 offset; + int max_width, min_width, max_height; + u32 alignment, offset; int aux_plane = intel_main_to_aux_plane(fb, 0); u32 aux_offset = plane_state->color_plane[aux_plane].offset; - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) { max_width = icl_max_plane_width(fb, 0, rotation); - else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + min_width = icl_min_plane_width(fb); + } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { max_width = glk_max_plane_width(fb, 0, rotation); - else + min_width = 1; + } else { max_width = skl_max_plane_width(fb, 0, rotation); + min_width = 1; + } if (INTEL_GEN(dev_priv) >= 11) max_height = icl_max_plane_height(); else max_height = skl_max_plane_height(); - if (w > max_width || h > max_height) { + if (w > max_width || w < min_width || h > max_height) { drm_dbg_kms(&dev_priv->drm, - "requested Y/RGB source size %dx%d too big (limit %dx%d)\n", - w, h, max_width, max_height); + "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", + w, h, min_width, max_width, max_height); return -EINVAL; } @@ -6211,7 +6252,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_ARGB16161616F: if (INTEL_GEN(dev_priv) >= 11) break; - /* fall through */ + fallthrough; default: drm_dbg_kms(&dev_priv->drm, "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", @@ -10802,9 +10843,18 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, u32 temp; if (intel_phy_is_combo(dev_priv, phy)) { - temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & - ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); + u32 mask, shift; + + if (IS_ROCKETLAKE(dev_priv)) { + mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); + } else { + mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); + } + + temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask; + id = temp >> shift; port_dpll_id = ICL_PORT_DPLL_DEFAULT; } else if (intel_phy_is_tc(dev_priv, phy)) { u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; @@ -10896,7 +10946,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, break; default: MISSING_CASE(ddi_pll_sel); - /* fall through */ + fallthrough; case PORT_CLK_SEL_NONE: return; } @@ -10956,10 +11006,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, drm_WARN(dev, 1, "unknown pipe linked to transcoder %s\n", transcoder_name(panel_transcoder)); - /* fall through */ + fallthrough; case TRANS_DDI_EDP_INPUT_A_ONOFF: force_thru = true; - /* fall through */ + fallthrough; case TRANS_DDI_EDP_INPUT_A_ON: trans_pipe = PIPE_A; break; @@ -12760,6 +12810,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, } + if (!mode_changed) + intel_psr2_sel_fetch_update(state, crtc); + return 0; } @@ -13183,7 +13236,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) case INTEL_OUTPUT_DDI: if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) break; - /* else, fall through */ + fallthrough; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: @@ -14930,7 +14983,7 @@ static int intel_atomic_check(struct drm_device *dev, if (any_ms && !check_digital_port_conflicts(state)) { drm_dbg_kms(&dev_priv->drm, "rejecting conflicting digital port configuration\n"); - ret = EINVAL; + ret = -EINVAL; goto fail; } @@ -14956,12 +15009,6 @@ static int intel_atomic_check(struct drm_device *dev, if (dev_priv->wm.distrust_bios_wm) any_ms = true; - if (any_ms) { - ret = intel_modeset_checks(state); - if (ret) - goto fail; - } - intel_fbc_choose_crtc(dev_priv, state); ret = calc_watermark_data(state); if (ret) @@ -14976,6 +15023,10 @@ static int intel_atomic_check(struct drm_device *dev, goto fail; if (any_ms) { + ret = intel_modeset_checks(state); + if (ret) + goto fail; + ret = intel_modeset_calc_cdclk(state); if (ret) return ret; @@ -15136,6 +15187,8 @@ static void commit_pipe_config(struct intel_atomic_state *state, if (new_crtc_state->update_pipe) intel_pipe_fastset(old_crtc_state, new_crtc_state); + + intel_psr2_program_trans_man_trk_ctl(new_crtc_state); } if (dev_priv->display.atomic_update_watermarks) @@ -17139,7 +17192,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (!intel_fb->frontbuffer) return -ENOMEM; - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); tiling = i915_gem_object_get_tiling(obj); stride = i915_gem_object_get_stride(obj); i915_gem_object_unlock(obj); @@ -17894,6 +17947,13 @@ int intel_modeset_init(struct drm_i915_private *i915) if (i915->max_cdclk_freq == 0) intel_update_max_cdclk(i915); + /* + * If the platform has HTI, we need to find out whether it has reserved + * any display resources before we create our display outputs. + */ + if (INTEL_INFO(i915)->display.has_hti) + i915->hti_state = intel_de_read(i915, HDPORT_STATE); + /* Just disable it once at startup */ intel_vga_disable(i915); intel_setup_outputs(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 3644752cc5ece8bc16aab4dacfebbb3bda992c9a..f549381048b3c612b8370e7edb9718ba85d8ab50 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -417,6 +417,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); seq_printf(m, "%d\t%d\n", frame, su_blocks); } + + seq_printf(m, "PSR2 selective fetch: %s\n", + enableddisabled(psr->psr2_sel_fetch_enabled)); } unlock: @@ -2044,9 +2047,12 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); static int i915_lpsp_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; - struct intel_encoder *encoder = - intel_attached_encoder(to_intel_connector(connector)); struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_encoder *encoder; + + encoder = intel_attached_encoder(to_intel_connector(connector)); + if (!encoder) + return -ENODEV; if (connector->status != connector_status_connected) return -ENODEV; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 0c713e83274dd061147d20c2e83e25adca10aa79..7946c6af4b1e743efc75dc07cd42a2ab202554f2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -3927,12 +3927,13 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block) int ret; while (1) { - u32 low_val = 0, high_val; + u32 low_val; + u32 high_val = 0; if (block) - high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ; + low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; else - high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ; + low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; /* * Spec states that we should timeout the request after 200us @@ -3951,8 +3952,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block) if (++tries == 3) break; - if (ret == -EAGAIN) - msleep(1); + msleep(1); } if (ret) @@ -4146,6 +4146,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, }, }, + { + .name = "TC cold off", + .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, + .ops = &tgl_tc_cold_off_ops, + .id = DISP_PW_ID_NONE, + }, { .name = "AUX A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, @@ -4332,12 +4338,6 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.irq_pipe_mask = BIT(PIPE_D), }, }, - { - .name = "TC cold off", - .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, - .ops = &tgl_tc_cold_off_ops, - .id = DISP_PW_ID_NONE, - }, }; static const struct i915_power_well_desc rkl_power_wells[] = { @@ -5240,10 +5240,10 @@ struct buddy_page_mask { }; static const struct buddy_page_mask tgl_buddy_page_masks[] = { - { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE }, { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, + { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, {} }; @@ -5302,6 +5302,12 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + /* Wa_14011294188:ehl,jsl,tgl,rkl */ + if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && + INTEL_PCH_TYPE(dev_priv) < PCH_DG1) + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, + PCH_DPMGUNIT_CLOCK_GATE_DISABLE); + /* 1. Enable PCH reset handshake. */ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index e8f809161c75fda754a26cb9f90766322a93b067..9349b15afff679345c3a76a9ffc07340e4ccfb5e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -931,6 +931,7 @@ struct intel_crtc_state { bool has_psr; bool has_psr2; + bool enable_psr2_sel_fetch; u32 dc3co_exitline; /* @@ -1073,6 +1074,8 @@ struct intel_crtc_state { /* For DSB related info */ struct intel_dsb *dsb; + + u32 psr2_man_track_ctl; }; enum intel_pipe_crc_source { @@ -1375,6 +1378,9 @@ struct intel_dp { /* Display stream compression testing */ bool force_dsc_en; + + bool hobl_failed; + bool hobl_active; }; enum lspcon_vendor { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 79c27f91f42c0a8f32bbbea38e9db77d27bfb895..284b15f845926c6f54d1731ebe7047576b0be464 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4449,62 +4449,6 @@ intel_dp_link_down(struct intel_encoder *encoder, } } -static void -intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 dpcd_ext[6]; - - /* - * Prior to DP1.3 the bit represented by - * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. - * if it is set DP_DPCD_REV at 0000h could be at a value less than - * the true capability of the panel. The only way to check is to - * then compare 0000h and 2200h. - */ - if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) - return; - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, - &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { - drm_err(&i915->drm, - "DPCD failed read at extended capabilities\n"); - return; - } - - if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { - drm_dbg_kms(&i915->drm, - "DPCD extended DPCD rev less than base DPCD rev\n"); - return; - } - - if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) - return; - - drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", - (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); - - memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); -} - -bool -intel_dp_read_dpcd(struct intel_dp *intel_dp) -{ - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, - sizeof(intel_dp->dpcd)) < 0) - return false; /* aux transfer failed */ - - intel_dp_extended_receiver_capabilities(intel_dp); - - drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), - intel_dp->dpcd); - - return intel_dp->dpcd[DP_DPCD_REV] != 0; -} - bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { u8 dprx = 0; @@ -4563,7 +4507,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) /* this function is meant to be called only once */ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); - if (!intel_dp_read_dpcd(intel_dp)) + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) return false; drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, @@ -4634,11 +4578,23 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) return true; } +static bool +intel_dp_has_sink_count(struct intel_dp *intel_dp) +{ + if (!intel_dp->attached_connector) + return false; + + return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, + intel_dp->dpcd, + &intel_dp->desc); +} static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { - if (!intel_dp_read_dpcd(intel_dp)) + int ret; + + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) return false; /* @@ -4653,18 +4609,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_dp_set_common_rates(intel_dp); } - /* - * Some eDP panels do not set a valid value for sink count, that is why - * it don't care about read it here and in intel_edp_init_dpcd(). - */ - if (!intel_dp_is_edp(intel_dp) && - !drm_dp_has_quirk(&intel_dp->desc, 0, - DP_DPCD_QUIRK_NO_SINK_COUNT)) { - u8 count; - ssize_t r; - - r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); - if (r < 1) + if (intel_dp_has_sink_count(intel_dp)) { + ret = drm_dp_read_sink_count(&intel_dp->aux); + if (ret < 0) return false; /* @@ -4672,7 +4619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * a member variable in intel_dp will track any changes * between short pulse interrupts. */ - intel_dp->sink_count = DP_GET_SINK_COUNT(count); + intel_dp->sink_count = ret; /* * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that @@ -4685,32 +4632,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) return false; } - if (!drm_dp_is_branch(intel_dp->dpcd)) - return true; /* native DP sink */ - - if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) - return true; /* no per-port downstream info */ - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, - intel_dp->downstream_ports, - DP_MAX_DOWNSTREAM_PORTS) < 0) - return false; /* downstream port status fetch failed */ - - return true; -} - -static bool -intel_dp_sink_can_mst(struct intel_dp *intel_dp) -{ - u8 mstm_cap; - - if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) - return false; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) - return false; - - return mstm_cap & DP_MST_CAP; + return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, + intel_dp->downstream_ports) == 0; } static bool @@ -4720,7 +4643,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) return i915->params.enable_dp_mst && intel_dp->can_mst && - intel_dp_sink_can_mst(intel_dp); + drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); } static void @@ -4729,7 +4652,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); + bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", @@ -5963,9 +5886,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) return connector_status_connected; /* If we're HPD-aware, SINK_COUNT changes dynamically */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + if (intel_dp_has_sink_count(intel_dp) && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { - return intel_dp->sink_count ? connector_status_connected : connector_status_disconnected; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index b901ab850cbd92cc0889bf8e89cacf4bf084aa88..0a3af3410d52e1fee205af8a2004e5d19507d446 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -99,7 +99,6 @@ bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); -bool intel_dp_read_dpcd(struct intel_dp *intel_dp); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index a23ed7290843a8eaba9b0127ed7c33e8a4723bf6..f2c8b56be9eadf5a7521b6599024c53ea99060cf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -410,10 +410,17 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) intel_connector->base.base.id, intel_connector->base.name, intel_dp->link_rate, intel_dp->lane_count); - if (!intel_dp_get_link_train_fallback_values(intel_dp, - intel_dp->link_rate, - intel_dp->lane_count)) - /* Schedule a Hotplug Uevent to userspace to start modeset */ - schedule_work(&intel_connector->modeset_retry_work); - return; + + if (intel_dp->hobl_active) { + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "Link Training failed with HOBL active, not enabling it from now on"); + intel_dp->hobl_failed = true; + } else if (intel_dp_get_link_train_fallback_values(intel_dp, + intel_dp->link_rate, + intel_dp->lane_count)) { + return; + } + + /* Schedule a Hotplug Uevent to userspace to start modeset */ + schedule_work(&intel_connector->modeset_retry_work); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index aeb6ee395cce5180ea0f3a88e441fa58f408286e..c9013f8f766fae695c9746a9cd91463eaf6e7ea6 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -892,7 +892,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, refclk = dev_priv->dpll.ref_clks.nssc; break; } - /* fall through */ + fallthrough; case WRPLL_REF_PCH_SSC: /* * We could calculate spread here, but our checking @@ -2977,7 +2977,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, switch (dev_priv->dpll.ref_clks.nssc) { default: MISSING_CASE(dev_priv->dpll.ref_clks.nssc); - /* fall-through */ + fallthrough; case 19200: *pll_params = tgl_tbt_pll_19_2MHz_values; break; @@ -2992,7 +2992,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, switch (dev_priv->dpll.ref_clks.nssc) { default: MISSING_CASE(dev_priv->dpll.ref_clks.nssc); - /* fall-through */ + fallthrough; case 19200: case 38400: *pll_params = icl_tbt_pll_19_2MHz_values; @@ -3120,7 +3120,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, switch (div1) { default: MISSING_CASE(div1); - /* fall through */ + fallthrough; case 2: hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2; break; @@ -3475,6 +3475,14 @@ static void icl_update_active_dpll(struct intel_atomic_state *state, icl_set_active_port_dpll(crtc_state, port_dpll_id); } +static u32 intel_get_hti_plls(struct drm_i915_private *i915) +{ + if (!(i915->hti_state & HDPORT_ENABLED)) + return 0; + + return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state); +} + static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -3504,13 +3512,22 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); - if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) + if (IS_ROCKETLAKE(dev_priv)) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); - else + } else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) { + dpll_mask = + BIT(DPLL_ID_EHL_DPLL4) | + BIT(DPLL_ID_ICL_DPLL1) | + BIT(DPLL_ID_ICL_DPLL0); + } else { dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); + } + + /* Eliminate DPLLs from consideration if reserved by HTI */ + dpll_mask &= ~intel_get_hti_plls(dev_priv); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, @@ -3791,7 +3808,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!(val & PLL_ENABLE)) goto out; - if (INTEL_GEN(dev_priv) >= 12) { + if (IS_ROCKETLAKE(dev_priv)) { + hw_state->cfgcr0 = intel_de_read(dev_priv, + RKL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(dev_priv, + RKL_DPLL_CFGCR1(id)); + } else if (INTEL_GEN(dev_priv) >= 12) { hw_state->cfgcr0 = intel_de_read(dev_priv, TGL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, @@ -3844,7 +3866,10 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; i915_reg_t cfgcr0_reg, cfgcr1_reg; - if (INTEL_GEN(dev_priv) >= 12) { + if (IS_ROCKETLAKE(dev_priv)) { + cfgcr0_reg = RKL_DPLL_CFGCR0(id); + cfgcr1_reg = RKL_DPLL_CFGCR1(id); + } else if (INTEL_GEN(dev_priv) >= 12) { cfgcr0_reg = TGL_DPLL_CFGCR0(id); cfgcr1_reg = TGL_DPLL_CFGCR1(id); } else { @@ -4276,6 +4301,21 @@ static const struct intel_dpll_mgr tgl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct dpll_info rkl_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, + { }, +}; + +static const struct intel_dpll_mgr rkl_pll_mgr = { + .dpll_info = rkl_plls, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, + .update_ref_clks = icl_update_dpll_ref_clks, + .dump_hw_state = icl_dump_hw_state, +}; + /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev: drm device @@ -4289,7 +4329,9 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (INTEL_GEN(dev_priv) >= 12) + if (IS_ROCKETLAKE(dev_priv)) + dpll_mgr = &rkl_pll_mgr; + else if (INTEL_GEN(dev_priv) >= 12) dpll_mgr = &tgl_pll_mgr; else if (IS_ELKHARTLAKE(dev_priv)) dpll_mgr = &ehl_pll_mgr; diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 24c3a0f212c66103db3a61cd0f3ac99ddd3d1fe6..135f5e8a4d70d57bee3f39151c5cd9870510ac52 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -424,6 +424,14 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, fbc->no_fbc_reason = reason; } +static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) >= 5 || IS_G4X(i915)) + return BIT_ULL(28); + else + return BIT_ULL(32); +} + static int find_compression_threshold(struct drm_i915_private *dev_priv, struct drm_mm_node *node, unsigned int size, @@ -442,6 +450,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, else end = U64_MAX; + end = min(end, intel_fbc_cfb_base_max(dev_priv)); + /* HACK: This code depends on what we will do in *_enable_fbc. If that * code changes, this code needs to change as well. * @@ -1416,6 +1426,13 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (!HAS_FBC(dev_priv)) return 0; + /* + * Fbc is causing random underruns in CI execution on TGL platforms. + * Disabling the same while the problem is being debugged and analyzed. + */ + if (IS_TIGERLAKE(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) return 1; diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 2979ed2588eb93ad9aa6dcc02c1c433bb9c7cab5..d898b370d7a4faa5f9968fee3dd744de69bc7f18 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -232,6 +232,8 @@ static void frontbuffer_release(struct kref *ref) RCU_INIT_POINTER(obj->frontbuffer, NULL); spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); + i915_active_fini(&front->write); + i915_gem_object_put(obj); kfree_rcu(front, rcu); } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 89a4d294822d5528da26cebfd4e6893ba41c0d02..1a0d49af2a0862791f214b3912949e61d451af1a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -336,8 +336,10 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, /* Fill up the empty slots in sha_text and write it out */ sha_empty = sizeof(sha_text) - sha_leftovers; - for (j = 0; j < sha_empty; j++) - sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); + for (j = 0; j < sha_empty; j++) { + u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); + sha_text |= ksv[j] << off; + } ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) @@ -435,7 +437,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, /* Write 32 bits of text */ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); - sha_text |= bstatus[0] << 24 | bstatus[1] << 16; + sha_text |= bstatus[0] << 8 | bstatus[1]; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) return ret; @@ -450,17 +452,29 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, return ret; sha_idx += sizeof(sha_text); } + + /* + * Terminate the SHA-1 stream by hand. For the other leftover + * cases this is appended by the hardware. + */ + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_32); + sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + return ret; + sha_idx += sizeof(sha_text); } else if (sha_leftovers == 3) { - /* Write 32 bits of text */ + /* Write 32 bits of text (filled from LSB) */ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); - sha_text |= bstatus[0] << 24; + sha_text |= bstatus[0]; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) return ret; sha_idx += sizeof(sha_text); - /* Write 8 bits of text, 24 bits of M0 */ + /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(dev_priv, bstatus[1]); @@ -781,6 +795,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) struct intel_hdcp *hdcp = &connector->hdcp; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; + u32 repeater_ctl; int ret; drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", @@ -796,6 +811,11 @@ static int _intel_hdcp_disable(struct intel_connector *connector) return -ETIMEDOUT; } + repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, + port); + intel_de_write(dev_priv, HDCP_REP_CTL, + intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl); + ret = hdcp->shim->toggle_signalling(dig_port, false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index b781bf46964437179aff89226d4c30633e528546..dc1b35559afdf863669319c8a6f0e026c91757c0 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -571,7 +571,7 @@ bool lspcon_init(struct intel_digital_port *dig_port) return false; } - if (!intel_dp_read_dpcd(dp)) { + if (drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd) != 0) { DRM_ERROR("LSPCON DPCD read failed\n"); return false; } diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index bbde3b12c3113cc83885bdc18851bbe0fba5979d..4072d7062efd656dd9b75651fa629ad70408825b 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -229,7 +229,7 @@ int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, case DRM_MODE_SCALE_NONE: WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w); WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h); - /* fall through */ + fallthrough; case DRM_MODE_SCALE_FULLSCREEN: x = y = 0; width = adjusted_mode->crtc_hdisplay; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index bf9e320c547da67ab937b797fbb4fb16eda67787..2b004ee9619cd4f4d0c3fd082bc769f52504b669 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -553,6 +553,22 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FAST_WAKE(7); } + if (dev_priv->psr.psr2_sel_fetch_enabled) { + /* WA 1408330847 */ + if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) + intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + DIS_RAM_BYPASS_PSR2_MAN_TRACK, + DIS_RAM_BYPASS_PSR2_MAN_TRACK); + + intel_de_write(dev_priv, + PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), + PSR2_MAN_TRK_CTL_ENABLE); + } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { + intel_de_write(dev_priv, + PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0); + } + /* * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. @@ -663,6 +679,38 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; } +static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; + + if (!dev_priv->params.enable_psr2_sel_fetch) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 sel fetch not enabled, disabled by parameter\n"); + return false; + } + + if (crtc_state->uapi.async_flip) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 sel fetch not enabled, async flip enabled\n"); + return false; + } + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 sel fetch not enabled, plane rotated\n"); + return false; + } + } + + return crtc_state->enable_psr2_sel_fetch = true; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -732,22 +780,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - /* - * Some platforms lack PSR2 HW tracking and instead require manual - * tracking by software. In this case, the driver is required to track - * the areas that need updates and program hardware to send selective - * updates. - * - * So until the software tracking is implemented, PSR2 needs to be - * disabled for platforms without PSR2 HW tracking. - */ - if (!HAS_PSR_HW_TRACKING(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, - "No PSR2 HW tracking in the platform\n"); - return false; + if (HAS_PSR2_SEL_FETCH(dev_priv)) { + if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && + !HAS_PSR_HW_TRACKING(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"); + return false; + } } - if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { + if (!crtc_state->enable_psr2_sel_fetch && + (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", crtc_hdisplay, crtc_vdisplay, @@ -898,6 +941,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, val |= EXITLINE_ENABLE; intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); } + + if (HAS_PSR_HW_TRACKING(dev_priv)) + intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, + dev_priv->psr.psr2_sel_fetch_enabled ? + IGNORE_PSR2_HW_TRACKING : 0); } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, @@ -919,6 +967,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, /* DC5/DC6 requires at least 6 idle frames */ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); dev_priv->psr.dc3co_exit_delay = val; + dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; /* * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR @@ -1058,6 +1107,13 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) psr_status_mask, 2000)) drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); + /* WA 1408330847 */ + if (dev_priv->psr.psr2_sel_fetch_enabled && + (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) + intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); + /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); @@ -1115,6 +1171,32 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) intel_psr_exit(dev_priv); } +void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct i915_psr *psr = &dev_priv->psr; + + if (!HAS_PSR2_SEL_FETCH(dev_priv) || + !crtc_state->enable_psr2_sel_fetch) + return; + + intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder), + crtc_state->psr2_man_track_ctl); +} + +void intel_psr2_sel_fetch_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + crtc_state->psr2_man_track_ctl = PSR2_MAN_TRK_CTL_ENABLE | + PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; +} + /** * intel_psr_update - Update PSR state * @intel_dp: Intel DP diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index b4515186d5f46cc4e4e024e2f9a734e4601db603..6a83c8e682e6dbe819782f6aec3aca0a4bac1ef7 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -13,6 +13,8 @@ struct drm_connector_state; struct drm_i915_private; struct intel_crtc_state; struct intel_dp; +struct intel_crtc; +struct intel_atomic_state; #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) void intel_psr_init_dpcd(struct intel_dp *intel_dp); @@ -43,5 +45,8 @@ void intel_psr_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp); +void intel_psr2_sel_fetch_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 2da4388e15403d00d8b04cea2c86810e7bbddd7b..5e9fb349c829266b662516d3371b47f1653f294b 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1531,7 +1531,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state, default: drm_WARN(&dev_priv->drm, 1, "unknown pixel multiplier specified\n"); - /* fall through */ + fallthrough; case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -2549,19 +2549,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; - /* fall through */ + fallthrough; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; - /* fall through */ + fallthrough; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; - /* fall through */ + fallthrough; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; - /* fall through */ + fallthrough; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; - /* fall through */ + fallthrough; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index d03860fef2d7908db8e2500ac56e66f495787b5e..6b72223981becf2ef02f2decd49c2b090def9a6c 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -2147,7 +2147,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_RGB565: if (INTEL_GEN(dev_priv) >= 11) break; - /* fall through */ + fallthrough; case DRM_FORMAT_C8: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_XBGR16161616F: @@ -2702,7 +2702,7 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2733,7 +2733,7 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2768,7 +2768,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2801,7 +2801,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_ABGR8888: if (is_ccs_modifier(modifier)) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: @@ -2819,7 +2819,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XVYU2101010: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_C8: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: @@ -2834,7 +2834,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) return true; - /* fall through */ + fallthrough; default: return false; } @@ -2843,8 +2843,9 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, enum plane_id plane_id) { - /* Wa_14010477008:tgl[a0..c0] */ - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) + /* Wa_14010477008:tgl[a0..c0],rkl[all] */ + if (IS_ROCKETLAKE(dev_priv) || + IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) return false; return plane_id < PLANE_SPRITE4; @@ -2860,7 +2861,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) return false; - /* fall through */ + fallthrough; case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: @@ -2877,7 +2878,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_ABGR8888: if (is_ccs_modifier(modifier)) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2889,7 +2890,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P016: if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) return true; - /* fall through */ + fallthrough; case DRM_FORMAT_RGB565: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: @@ -2910,7 +2911,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) return true; - /* fall through */ + fallthrough; default: return false; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 5b5dc86a5737584fabec9c3312a21e7bea020200..8f67aef18b2da34f36eed452546ca042e786bcf9 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -159,7 +159,7 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) switch (lane_mask) { default: MISSING_CASE(lane_mask); - /* fall-through */ + fallthrough; case 0x1: case 0x2: case 0x4: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 278664f831e78fe6781777eec1a2c235d5c77feb..272cf3ea68d56918afa2fc3a7e2bc4a86d9e6c3f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -32,12 +32,13 @@ static void vma_clear_pages(struct i915_vma *vma) vma->pages = NULL; } -static int vma_bind(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static void vma_bind(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { - return vm->vma_ops.bind_vma(vm, vma, cache_level, flags); + vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags); } static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma) @@ -157,6 +158,7 @@ static void clear_pages_worker(struct work_struct *work) struct clear_pages_work *w = container_of(work, typeof(*w), work); struct drm_i915_gem_object *obj = w->sleeve->vma->obj; struct i915_vma *vma = w->sleeve->vma; + struct i915_gem_ww_ctx ww; struct i915_request *rq; struct i915_vma *batch; int err = w->dma.error; @@ -172,17 +174,20 @@ static void clear_pages_worker(struct work_struct *work) obj->read_domains = I915_GEM_GPU_DOMAINS; obj->write_domain = 0; - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (unlikely(err)) + i915_gem_ww_ctx_init(&ww, false); + intel_engine_pm_get(w->ce->engine); +retry: + err = intel_context_pin_ww(w->ce, &ww); + if (err) goto out_signal; - batch = intel_emit_vma_fill_blt(w->ce, vma, w->value); + batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value); if (IS_ERR(batch)) { err = PTR_ERR(batch); - goto out_unpin; + goto out_ctx; } - rq = intel_context_create_request(w->ce); + rq = i915_request_create(w->ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_batch; @@ -224,9 +229,19 @@ static void clear_pages_worker(struct work_struct *work) i915_request_add(rq); out_batch: intel_emit_vma_release(w->ce, batch); -out_unpin: - i915_vma_unpin(vma); +out_ctx: + intel_context_unpin(w->ce); out_signal: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + i915_vma_unpin(w->sleeve->vma); + intel_engine_pm_put(w->ce->engine); + if (unlikely(err)) { dma_fence_set_error(&w->dma, err); dma_fence_signal(&w->dma); @@ -234,6 +249,44 @@ static void clear_pages_worker(struct work_struct *work) } } +static int pin_wait_clear_pages_work(struct clear_pages_work *w, + struct intel_context *ce) +{ + struct i915_vma *vma = w->sleeve->vma; + struct i915_gem_ww_ctx ww; + int err; + + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_gem_object_lock(vma->obj, &ww); + if (err) + goto out; + + err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); + if (unlikely(err)) + goto out; + + err = i915_sw_fence_await_reservation(&w->wait, + vma->obj->base.resv, NULL, + true, 0, I915_FENCE_GFP); + if (err) + goto err_unpin_vma; + + dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma); + +err_unpin_vma: + if (err) + i915_vma_unpin(vma); +out: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + return err; +} + static int __i915_sw_fence_call clear_pages_work_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) @@ -287,17 +340,9 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0); i915_sw_fence_init(&work->wait, clear_pages_work_notify); - i915_gem_object_lock(obj); - err = i915_sw_fence_await_reservation(&work->wait, - obj->base.resv, NULL, true, 0, - I915_FENCE_GFP); - if (err < 0) { + err = pin_wait_clear_pages_work(work, ce); + if (err < 0) dma_fence_set_error(&work->dma, err); - } else { - dma_resv_add_excl_fence(obj->base.resv, &work->dma); - err = 0; - } - i915_gem_object_unlock(obj); dma_fence_get(&work->dma); i915_sw_fence_commit(&work->wait); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index d0bdb6d447ed4a98af2b94ffa1a59b2135314813..cf5ecbde9e0695c21fb121b8e93fecc237e49f69 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine) return __reset_engine(engine); } -static struct intel_engine_cs *__active_engine(struct i915_request *rq) +static bool +__active_engine(struct i915_request *rq, struct intel_engine_cs **active) { struct intel_engine_cs *engine, *locked; + bool ret = false; /* * Serialise with __i915_request_submit() so that it sees * is-banned?, or we know the request is already inflight. + * + * Note that rq->engine is unstable, and so we double + * check that we have acquired the lock on the final engine. */ locked = READ_ONCE(rq->engine); spin_lock_irq(&locked->active.lock); while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { spin_unlock(&locked->active.lock); - spin_lock(&engine->active.lock); locked = engine; + spin_lock(&locked->active.lock); } - engine = NULL; - if (i915_request_is_active(rq) && rq->fence.error != -EIO) - engine = rq->engine; + if (!i915_request_completed(rq)) { + if (i915_request_is_active(rq) && rq->fence.error != -EIO) + *active = locked; + ret = true; + } spin_unlock_irq(&locked->active.lock); - return engine; + return ret; } static struct intel_engine_cs *active_engine(struct intel_context *ce) @@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) if (!ce->timeline) return NULL; - mutex_lock(&ce->timeline->mutex); - list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { - if (i915_request_completed(rq)) - break; + rcu_read_lock(); + list_for_each_entry_rcu(rq, &ce->timeline->requests, link) { + if (i915_request_is_active(rq) && i915_request_completed(rq)) + continue; /* Check with the backend if the request is inflight */ - engine = __active_engine(rq); - if (engine) + if (__active_engine(rq, &engine)) break; } - mutex_unlock(&ce->timeline->mutex); + rcu_read_unlock(); return engine; } @@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915) ctx->i915 = i915; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); mutex_init(&ctx->mutex); + INIT_LIST_HEAD(&ctx->link); spin_lock_init(&ctx->stale.lock); INIT_LIST_HEAD(&ctx->stale.engines); @@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915) for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; - spin_lock(&i915->gem.contexts.lock); - list_add_tail(&ctx->link, &i915->gem.contexts.list); - spin_unlock(&i915->gem.contexts.lock); - return ctx; err_free: @@ -889,7 +892,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { struct intel_timeline *timeline; - timeline = intel_timeline_create(&i915->gt, NULL); + timeline = intel_timeline_create(&i915->gt); if (IS_ERR(timeline)) { context_close(ctx); return ERR_CAST(timeline); @@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx, struct drm_i915_file_private *fpriv, u32 *id) { + struct drm_i915_private *i915 = ctx->i915; struct i915_address_space *vm; int ret; @@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx, /* And finally expose ourselves to userspace via the idr */ ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); if (ret) - put_pid(fetch_and_zero(&ctx->pid)); + goto err_pid; + + spin_lock(&i915->gem.contexts.lock); + list_add_tail(&ctx->link, &i915->gem.contexts.list); + spin_unlock(&i915->gem.contexts.lock); + return 0; + +err_pid: + put_pid(fetch_and_zero(&ctx->pid)); return ret; } @@ -1094,6 +1106,7 @@ I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); static int context_barrier_task(struct i915_gem_context *ctx, intel_engine_mask_t engines, bool (*skip)(struct intel_context *ce, void *data), + int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data), int (*emit)(struct i915_request *rq, void *data), void (*task)(void *data), void *data) @@ -1101,6 +1114,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, struct context_barrier_task *cb; struct i915_gem_engines_iter it; struct i915_gem_engines *e; + struct i915_gem_ww_ctx ww; struct intel_context *ce; int err = 0; @@ -1138,10 +1152,21 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (skip && skip(ce, data)) continue; - rq = intel_context_create_request(ce); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = intel_context_pin_ww(ce, &ww); + if (err) + goto err; + + if (pin) + err = pin(ce, &ww, data); + if (err) + goto err_unpin; + + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); - break; + goto err_unpin; } err = 0; @@ -1151,6 +1176,16 @@ static int context_barrier_task(struct i915_gem_context *ctx, err = i915_active_add_request(&cb->base, rq); i915_request_add(rq); +err_unpin: + intel_context_unpin(ce); +err: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + if (err) break; } @@ -1206,6 +1241,17 @@ static void set_ppgtt_barrier(void *data) i915_vm_close(old); } +static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data) +{ + struct i915_address_space *vm = ce->vm; + + if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915)) + /* ppGTT is not part of the legacy context image */ + return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww); + + return 0; +} + static int emit_ppgtt_update(struct i915_request *rq, void *data) { struct i915_address_space *vm = rq->context->vm; @@ -1262,20 +1308,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) static bool skip_ppgtt_update(struct intel_context *ce, void *data) { - if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) - return true; - if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) - return false; - - if (!atomic_read(&ce->pin_count)) - return true; - - /* ppGTT is not part of the legacy context image */ - if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) - return true; - - return false; + return !ce->state; + else + return !atomic_read(&ce->pin_count); } static int set_ppgtt(struct drm_i915_file_private *file_priv, @@ -1326,6 +1362,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, */ err = context_barrier_task(ctx, ALL_ENGINES, skip_ppgtt_update, + pin_ppgtt_update, emit_ppgtt_update, set_ppgtt_barrier, old); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 2679380159fc74f29a1ecf1e991276e55e2dd7db..27fddc22a7c6fef5d07b8a6fcb9299c70971d797 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -128,7 +128,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire if (err) return err; - err = i915_gem_object_lock_interruptible(obj); + err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out; @@ -149,7 +149,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct if (err) return err; - err = i915_gem_object_lock_interruptible(obj); + err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 7f76fc68f498aa872a208166023ddfb4f98cf347..7c90a63c273d8d52e2148f45bee13e94d4e27e6e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -32,11 +32,17 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) if (!i915_gem_object_is_framebuffer(obj)) return; - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); __i915_gem_object_flush_for_display(obj); i915_gem_object_unlock(obj); } +void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj) +{ + if (i915_gem_object_is_framebuffer(obj)) + __i915_gem_object_flush_for_display(obj); +} + /** * Moves a single object to the WC read, and possibly write domain. * @obj: object to act on @@ -197,18 +203,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_object_lock_interruptible(obj); - if (ret) - return ret; - /* Always invalidate stale cachelines */ if (obj->cache_level != cache_level) { i915_gem_object_set_cache_coherency(obj, cache_level); obj->cache_dirty = true; } - i915_gem_object_unlock(obj); - /* The cache-level will be applied when each vma is rebound. */ return i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE | @@ -293,7 +293,12 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, goto out; } + ret = i915_gem_object_lock_interruptible(obj, NULL); + if (ret) + goto out; + ret = i915_gem_object_set_cache_level(obj, level); + i915_gem_object_unlock(obj); out: i915_gem_object_put(obj); @@ -313,6 +318,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, unsigned int flags) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_gem_ww_ctx ww; struct i915_vma *vma; int ret; @@ -320,6 +326,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) return ERR_PTR(-EINVAL); + i915_gem_ww_ctx_init(&ww, true); +retry: + ret = i915_gem_object_lock(obj, &ww); + if (ret) + goto err; /* * The display engine is not coherent with the LLC cache on gen6. As * a result, we make sure that the pinning that is about to occur is @@ -334,7 +345,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE); if (ret) - return ERR_PTR(ret); + goto err; /* * As the user may map the buffer once pinned in the display plane @@ -347,18 +358,31 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, vma = ERR_PTR(-ENOSPC); if ((flags & PIN_MAPPABLE) == 0 && (!view || view->type == I915_GGTT_VIEW_NORMAL)) - vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, - flags | - PIN_MAPPABLE | - PIN_NONBLOCK); - if (IS_ERR(vma)) - vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); - if (IS_ERR(vma)) - return vma; + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0, alignment, + flags | PIN_MAPPABLE | + PIN_NONBLOCK); + if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0, + alignment, flags); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - i915_gem_object_flush_if_display(obj); + i915_gem_object_flush_if_display_locked(obj); + +err: + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + if (ret) + return ERR_PTR(ret); return vma; } @@ -536,7 +560,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (err) goto out; - err = i915_gem_object_lock_interruptible(obj); + err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out_unpin; @@ -576,19 +600,17 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; - ret = i915_gem_object_lock_interruptible(obj); - if (ret) - return ret; + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); if (ret) - goto err_unlock; + return ret; ret = i915_gem_object_pin_pages(obj); if (ret) - goto err_unlock; + return ret; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || !static_cpu_has(X86_FEATURE_CLFLUSH)) { @@ -616,8 +638,6 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, err_unpin: i915_gem_object_unpin_pages(obj); -err_unlock: - i915_gem_object_unlock(obj); return ret; } @@ -630,20 +650,18 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; - ret = i915_gem_object_lock_interruptible(obj); - if (ret) - return ret; + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (ret) - goto err_unlock; + return ret; ret = i915_gem_object_pin_pages(obj); if (ret) - goto err_unlock; + return ret; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || !static_cpu_has(X86_FEATURE_CLFLUSH)) { @@ -680,7 +698,5 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, err_unpin: i915_gem_object_unpin_pages(obj); -err_unlock: - i915_gem_object_unlock(obj); return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 6b4ec66cb558dca8b893d8b3d13d7622a5ce1073..5509946f1a1da7005220bc7a5fbcd11ea39d2d06 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -26,6 +26,7 @@ #include "i915_gem_ioctls.h" #include "i915_sw_fence_work.h" #include "i915_trace.h" +#include "i915_user_extensions.h" struct eb_vma { struct i915_vma *vma; @@ -40,9 +41,11 @@ struct eb_vma { u32 handle; }; -struct eb_vma_array { - struct kref kref; - struct eb_vma vma[]; +enum { + FORCE_CPU_RELOC = 1, + FORCE_GTT_RELOC, + FORCE_GPU_RELOC, +#define DBG_FORCE_RELOC 0 /* choose one of the above! */ }; #define __EXEC_OBJECT_HAS_PIN BIT(31) @@ -50,9 +53,11 @@ struct eb_vma_array { #define __EXEC_OBJECT_NEEDS_MAP BIT(29) #define __EXEC_OBJECT_NEEDS_BIAS BIT(28) #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ +#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_HAS_RELOC BIT(31) -#define __EXEC_INTERNAL_FLAGS (~0u << 31) +#define __EXEC_ENGINE_PINNED BIT(30) +#define __EXEC_INTERNAL_FLAGS (~0u << 30) #define UPDATE PIN_OFFSET_FIXED #define BATCH_OFFSET_BIAS (256*1024) @@ -222,6 +227,13 @@ struct eb_vma_array { * the batchbuffer in trusted mode, otherwise the ioctl is rejected. */ +struct eb_fence { + struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */ + struct dma_fence *dma_fence; + u64 value; + struct dma_fence_chain *chain_fence; +}; + struct i915_execbuffer { struct drm_i915_private *i915; /** i915 backpointer */ struct drm_file *file; /** per-file lookup tables and limits */ @@ -246,6 +258,8 @@ struct i915_execbuffer { /** list of vma that have execobj.relocation_count */ struct list_head relocs; + struct i915_gem_ww_ctx ww; + /** * Track the most recently used object for relocations, as we * frequently have to perform multiple relocations within the same @@ -253,25 +267,30 @@ struct i915_execbuffer { */ struct reloc_cache { struct drm_mm_node node; /** temporary GTT binding */ + unsigned long vaddr; /** Current kmap address */ + unsigned long page; /** Currently mapped page index */ unsigned int gen; /** Cached value of INTEL_GEN */ bool use_64bit_reloc : 1; bool has_llc : 1; bool has_fence : 1; bool needs_unfenced : 1; - struct i915_vma *target; struct i915_request *rq; - struct i915_vma *rq_vma; u32 *rq_cmd; unsigned int rq_size; + struct intel_gt_buffer_pool_node *pool; } reloc_cache; + struct intel_gt_buffer_pool_node *reloc_pool; /** relocation pool for -EDEADLK handling */ + struct intel_context *reloc_context; + u64 invalid_flags; /** Set of execobj.flags that are invalid */ u32 context_flags; /** Set of execobj.flags to insert from the ctx */ u32 batch_start_offset; /** Location within object of batch */ u32 batch_len; /** Length of batch within object */ u32 batch_flags; /** Flags composed for emit_bb_start() */ + struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ /** * Indicate either the size of the hastable used to resolve @@ -280,9 +299,16 @@ struct i915_execbuffer { */ int lut_size; struct hlist_head *buckets; /** ht for relocation handles */ - struct eb_vma_array *array; + + struct eb_fence *fences; + unsigned long num_fences; }; +static int eb_parse(struct i915_execbuffer *eb); +static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, + bool throttle); +static void eb_unpin_engine(struct i915_execbuffer *eb); + static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { return intel_engine_requires_cmd_parser(eb->engine) || @@ -290,62 +316,8 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) eb->args->batch_len); } -static struct eb_vma_array *eb_vma_array_create(unsigned int count) -{ - struct eb_vma_array *arr; - - arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN); - if (!arr) - return NULL; - - kref_init(&arr->kref); - arr->vma[0].vma = NULL; - - return arr; -} - -static inline void eb_unreserve_vma(struct eb_vma *ev) -{ - struct i915_vma *vma = ev->vma; - - if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) - __i915_vma_unpin_fence(vma); - - if (ev->flags & __EXEC_OBJECT_HAS_PIN) - __i915_vma_unpin(vma); - - ev->flags &= ~(__EXEC_OBJECT_HAS_PIN | - __EXEC_OBJECT_HAS_FENCE); -} - -static void eb_vma_array_destroy(struct kref *kref) -{ - struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref); - struct eb_vma *ev = arr->vma; - - while (ev->vma) { - eb_unreserve_vma(ev); - i915_vma_put(ev->vma); - ev++; - } - - kvfree(arr); -} - -static void eb_vma_array_put(struct eb_vma_array *arr) -{ - kref_put(&arr->kref, eb_vma_array_destroy); -} - static int eb_create(struct i915_execbuffer *eb) { - /* Allocate an extra slot for use by the command parser + sentinel */ - eb->array = eb_vma_array_create(eb->buffer_count + 2); - if (!eb->array) - return -ENOMEM; - - eb->vma = eb->array->vma; - if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { unsigned int size = 1 + ilog2(eb->buffer_count); @@ -379,10 +351,8 @@ static int eb_create(struct i915_execbuffer *eb) break; } while (--size); - if (unlikely(!size)) { - eb_vma_array_put(eb->array); + if (unlikely(!size)) return -ENOMEM; - } eb->lut_size = size; } else { @@ -466,16 +436,17 @@ eb_pin_vma(struct i915_execbuffer *eb, pin_flags |= PIN_GLOBAL; /* Attempt to reuse the current location if available */ - if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) { + /* TODO: Add -EDEADLK handling here */ + if (unlikely(i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags))) { if (entry->flags & EXEC_OBJECT_PINNED) return false; /* Failing that pick any _free_ space if suitable */ - if (unlikely(i915_vma_pin(vma, - entry->pad_to_size, - entry->alignment, - eb_pin_flags(entry, ev->flags) | - PIN_USER | PIN_NOEVICT))) + if (unlikely(i915_vma_pin_ww(vma, &eb->ww, + entry->pad_to_size, + entry->alignment, + eb_pin_flags(entry, ev->flags) | + PIN_USER | PIN_NOEVICT))) return false; } @@ -493,6 +464,19 @@ eb_pin_vma(struct i915_execbuffer *eb, return !eb_vma_misplaced(entry, vma, ev->flags); } +static inline void +eb_unreserve_vma(struct eb_vma *ev) +{ + if (!(ev->flags & __EXEC_OBJECT_HAS_PIN)) + return; + + if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) + __i915_vma_unpin_fence(ev->vma); + + __i915_vma_unpin(ev->vma); + ev->flags &= ~__EXEC_OBJECT_RESERVED; +} + static int eb_validate_vma(struct i915_execbuffer *eb, struct drm_i915_gem_exec_object2 *entry, @@ -584,19 +568,26 @@ eb_add_vma(struct i915_execbuffer *eb, eb->batch = ev; } +} - if (eb_pin_vma(eb, entry, ev)) { - if (entry->offset != vma->node.start) { - entry->offset = vma->node.start | UPDATE; - eb->args->flags |= __EXEC_HAS_RELOC; - } - } else { - eb_unreserve_vma(ev); - list_add_tail(&ev->bind_link, &eb->unbound); - } +static inline int use_cpu_reloc(const struct reloc_cache *cache, + const struct drm_i915_gem_object *obj) +{ + if (!i915_gem_object_has_struct_page(obj)) + return false; + + if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) + return true; + + if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) + return false; + + return (cache->has_llc || + obj->cache_dirty || + obj->cache_level != I915_CACHE_NONE); } -static int eb_reserve_vma(const struct i915_execbuffer *eb, +static int eb_reserve_vma(struct i915_execbuffer *eb, struct eb_vma *ev, u64 pin_flags) { @@ -611,7 +602,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, return err; } - err = i915_vma_pin(vma, + err = i915_vma_pin_ww(vma, &eb->ww, entry->pad_to_size, entry->alignment, eb_pin_flags(entry, ev->flags) | pin_flags); if (err) @@ -661,10 +652,6 @@ static int eb_reserve(struct i915_execbuffer *eb) * This avoid unnecessary unbinding of later objects in order to make * room for the earlier objects *unless* we need to defragment. */ - - if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex)) - return -EINTR; - pass = 0; do { list_for_each_entry(ev, &eb->unbound, bind_link) { @@ -672,8 +659,8 @@ static int eb_reserve(struct i915_execbuffer *eb) if (err) break; } - if (!(err == -ENOSPC || err == -EAGAIN)) - break; + if (err != -ENOSPC) + return err; /* Resort *all* the objects into priority order */ INIT_LIST_HEAD(&eb->unbound); @@ -703,13 +690,6 @@ static int eb_reserve(struct i915_execbuffer *eb) } list_splice_tail(&last, &eb->unbound); - if (err == -EAGAIN) { - mutex_unlock(&eb->i915->drm.struct_mutex); - flush_workqueue(eb->i915->mm.userptr_wq); - mutex_lock(&eb->i915->drm.struct_mutex); - continue; - } - switch (pass++) { case 0: break; @@ -720,20 +700,15 @@ static int eb_reserve(struct i915_execbuffer *eb) err = i915_gem_evict_vm(eb->context->vm); mutex_unlock(&eb->context->vm->mutex); if (err) - goto unlock; + return err; break; default: - err = -ENOSPC; - goto unlock; + return -ENOSPC; } pin_flags = PIN_USER; } while (1); - -unlock: - mutex_unlock(&eb->i915->drm.struct_mutex); - return err; } static unsigned int eb_batch_index(const struct i915_execbuffer *eb) @@ -856,12 +831,12 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) static int eb_lookup_vmas(struct i915_execbuffer *eb) { + struct drm_i915_private *i915 = eb->i915; unsigned int batch = eb_batch_index(eb); unsigned int i; int err = 0; INIT_LIST_HEAD(&eb->relocs); - INIT_LIST_HEAD(&eb->unbound); for (i = 0; i < eb->buffer_count; i++) { struct i915_vma *vma; @@ -869,22 +844,83 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) vma = eb_lookup_vma(eb, eb->exec[i].handle); if (IS_ERR(vma)) { err = PTR_ERR(vma); - break; + goto err; } err = eb_validate_vma(eb, &eb->exec[i], vma); if (unlikely(err)) { i915_vma_put(vma); - break; + goto err; } eb_add_vma(eb, i, batch, vma); } + if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) { + drm_dbg(&i915->drm, + "Attempting to use self-modifying batch buffer\n"); + return -EINVAL; + } + + if (range_overflows_t(u64, + eb->batch_start_offset, eb->batch_len, + eb->batch->vma->size)) { + drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); + return -EINVAL; + } + + if (eb->batch_len == 0) + eb->batch_len = eb->batch->vma->size - eb->batch_start_offset; + + return 0; + +err: eb->vma[i].vma = NULL; return err; } +static int eb_validate_vmas(struct i915_execbuffer *eb) +{ + unsigned int i; + int err; + + INIT_LIST_HEAD(&eb->unbound); + + for (i = 0; i < eb->buffer_count; i++) { + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + + err = i915_gem_object_lock(vma->obj, &eb->ww); + if (err) + return err; + + if (eb_pin_vma(eb, entry, ev)) { + if (entry->offset != vma->node.start) { + entry->offset = vma->node.start | UPDATE; + eb->args->flags |= __EXEC_HAS_RELOC; + } + } else { + eb_unreserve_vma(ev); + + list_add_tail(&ev->bind_link, &eb->unbound); + if (drm_mm_node_allocated(&vma->node)) { + err = i915_vma_unbind(vma); + if (err) + return err; + } + } + + GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && + eb_vma_misplaced(&eb->exec[i], vma, ev->flags)); + } + + if (!list_empty(&eb->unbound)) + return eb_reserve(eb); + + return 0; +} + static struct eb_vma * eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) { @@ -905,13 +941,31 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) } } +static void eb_release_vmas(struct i915_execbuffer *eb, bool final) +{ + const unsigned int count = eb->buffer_count; + unsigned int i; + + for (i = 0; i < count; i++) { + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + + if (!vma) + break; + + eb_unreserve_vma(ev); + + if (final) + i915_vma_put(vma); + } + + eb_unpin_engine(eb); +} + static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); - if (eb->array) - eb_vma_array_put(eb->array); - if (eb->lut_size > 0) kfree(eb->buckets); } @@ -923,9 +977,19 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc, return gen8_canonical_addr((int)reloc->delta + target->node.start); } +static void reloc_cache_clear(struct reloc_cache *cache) +{ + cache->rq = NULL; + cache->rq_cmd = NULL; + cache->pool = NULL; + cache->rq_size = 0; +} + static void reloc_cache_init(struct reloc_cache *cache, struct drm_i915_private *i915) { + cache->page = -1; + cache->vaddr = 0; /* Must be a variable in the struct to allow GCC to unroll. */ cache->gen = INTEL_GEN(i915); cache->has_llc = HAS_LLC(i915); @@ -933,120 +997,249 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->has_fence = cache->gen < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.flags = 0; - cache->rq = NULL; - cache->target = NULL; + reloc_cache_clear(cache); } -#define RELOC_TAIL 4 +static inline void *unmask_page(unsigned long p) +{ + return (void *)(uintptr_t)(p & PAGE_MASK); +} -static int reloc_gpu_chain(struct reloc_cache *cache) +static inline unsigned int unmask_flags(unsigned long p) { - struct intel_gt_buffer_pool_node *pool; - struct i915_request *rq = cache->rq; - struct i915_vma *batch; - u32 *cmd; - int err; + return p & ~PAGE_MASK; +} - pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE); - if (IS_ERR(pool)) - return PTR_ERR(pool); +#define KMAP 0x4 /* after CLFLUSH_FLAGS */ - batch = i915_vma_instance(pool->obj, rq->context->vm, NULL); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_pool; - } +static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) +{ + struct drm_i915_private *i915 = + container_of(cache, struct i915_execbuffer, reloc_cache)->i915; + return &i915->ggtt; +} - err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); - if (err) - goto out_pool; +static void reloc_cache_put_pool(struct i915_execbuffer *eb, struct reloc_cache *cache) +{ + if (!cache->pool) + return; - GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE / sizeof(u32)); - cmd = cache->rq_cmd + cache->rq_size; - *cmd++ = MI_ARB_CHECK; - if (cache->gen >= 8) - *cmd++ = MI_BATCH_BUFFER_START_GEN8; - else if (cache->gen >= 6) - *cmd++ = MI_BATCH_BUFFER_START; - else - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *cmd++ = lower_32_bits(batch->node.start); - *cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */ - i915_gem_object_flush_map(cache->rq_vma->obj); - i915_gem_object_unpin_map(cache->rq_vma->obj); - cache->rq_vma = NULL; + /* + * This is a bit nasty, normally we keep objects locked until the end + * of execbuffer, but we already submit this, and have to unlock before + * dropping the reference. Fortunately we can only hold 1 pool node at + * a time, so this should be harmless. + */ + i915_gem_ww_unlock_single(cache->pool->obj); + intel_gt_buffer_pool_put(cache->pool); + cache->pool = NULL; +} - err = intel_gt_buffer_pool_mark_active(pool, rq); - if (err == 0) { - i915_vma_lock(batch); - err = i915_request_await_object(rq, batch->obj, false); - if (err == 0) - err = i915_vma_move_to_active(batch, rq, 0); - i915_vma_unlock(batch); - } - i915_vma_unpin(batch); - if (err) - goto out_pool; +static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cache) +{ + struct drm_i915_gem_object *obj = cache->rq->batch->obj; - cmd = i915_gem_object_pin_map(batch->obj, - cache->has_llc ? - I915_MAP_FORCE_WB : - I915_MAP_FORCE_WC); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto out_pool; - } + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); + cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - /* Return with batch mapping (cmd) still pinned */ - cache->rq_cmd = cmd; - cache->rq_size = 0; - cache->rq_vma = batch; + __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); + i915_gem_object_unpin_map(obj); -out_pool: - intel_gt_buffer_pool_put(pool); - return err; + intel_gt_chipset_flush(cache->rq->engine->gt); + + i915_request_add(cache->rq); + reloc_cache_put_pool(eb, cache); + reloc_cache_clear(cache); + + eb->reloc_pool = NULL; } -static unsigned int reloc_bb_flags(const struct reloc_cache *cache) +static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb) { - return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE; + void *vaddr; + + if (cache->rq) + reloc_gpu_flush(eb, cache); + + if (!cache->vaddr) + return; + + vaddr = unmask_page(cache->vaddr); + if (cache->vaddr & KMAP) { + struct drm_i915_gem_object *obj = + (struct drm_i915_gem_object *)cache->node.mm; + if (cache->vaddr & CLFLUSH_AFTER) + mb(); + + kunmap_atomic(vaddr); + i915_gem_object_finish_access(obj); + } else { + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + + intel_gt_flush_ggtt_writes(ggtt->vm.gt); + io_mapping_unmap_atomic((void __iomem *)vaddr); + + if (drm_mm_node_allocated(&cache->node)) { + ggtt->vm.clear_range(&ggtt->vm, + cache->node.start, + cache->node.size); + mutex_lock(&ggtt->vm.mutex); + drm_mm_remove_node(&cache->node); + mutex_unlock(&ggtt->vm.mutex); + } else { + i915_vma_unpin((struct i915_vma *)cache->node.mm); + } + } + + cache->vaddr = 0; + cache->page = -1; } -static int reloc_gpu_flush(struct reloc_cache *cache) +static void *reloc_kmap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + unsigned long pageno) { - struct i915_request *rq; - int err; + void *vaddr; + struct page *page; - rq = fetch_and_zero(&cache->rq); - if (!rq) - return 0; + if (cache->vaddr) { + kunmap_atomic(unmask_page(cache->vaddr)); + } else { + unsigned int flushes; + int err; - if (cache->rq_vma) { - struct drm_i915_gem_object *obj = cache->rq_vma->obj; + err = i915_gem_object_prepare_write(obj, &flushes); + if (err) + return ERR_PTR(err); - GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); - cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END; + BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); + BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); - __i915_gem_object_flush_map(obj, - 0, sizeof(u32) * cache->rq_size); - i915_gem_object_unpin_map(obj); + cache->vaddr = flushes | KMAP; + cache->node.mm = (void *)obj; + if (flushes) + mb(); } - err = 0; - if (rq->engine->emit_init_breadcrumb) - err = rq->engine->emit_init_breadcrumb(rq); - if (!err) - err = rq->engine->emit_bb_start(rq, - rq->batch->node.start, - PAGE_SIZE, - reloc_bb_flags(cache)); - if (err) - i915_request_set_error_once(rq, err); + page = i915_gem_object_get_page(obj, pageno); + if (!obj->mm.dirty) + set_page_dirty(page); - intel_gt_chipset_flush(rq->engine->gt); - i915_request_add(rq); + vaddr = kmap_atomic(page); + cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; + cache->page = pageno; - return err; + return vaddr; +} + +static void *reloc_iomap(struct drm_i915_gem_object *obj, + struct i915_execbuffer *eb, + unsigned long page) +{ + struct reloc_cache *cache = &eb->reloc_cache; + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + unsigned long offset; + void *vaddr; + + if (cache->vaddr) { + intel_gt_flush_ggtt_writes(ggtt->vm.gt); + io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); + } else { + struct i915_vma *vma; + int err; + + if (i915_gem_object_is_tiled(obj)) + return ERR_PTR(-EINVAL); + + if (use_cpu_reloc(cache, obj)) + return NULL; + + err = i915_gem_object_set_to_gtt_domain(obj, true); + if (err) + return ERR_PTR(err); + + vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK /* NOWARN */ | + PIN_NOEVICT); + if (vma == ERR_PTR(-EDEADLK)) + return vma; + + if (IS_ERR(vma)) { + memset(&cache->node, 0, sizeof(cache->node)); + mutex_lock(&ggtt->vm.mutex); + err = drm_mm_insert_node_in_range + (&ggtt->vm.mm, &cache->node, + PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + mutex_unlock(&ggtt->vm.mutex); + if (err) /* no inactive aperture space, use cpu reloc */ + return NULL; + } else { + cache->node.start = vma->node.start; + cache->node.mm = (void *)vma; + } + } + + offset = cache->node.start; + if (drm_mm_node_allocated(&cache->node)) { + ggtt->vm.insert_page(&ggtt->vm, + i915_gem_object_get_dma_address(obj, page), + offset, I915_CACHE_NONE, 0); + } else { + offset += page << PAGE_SHIFT; + } + + vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, + offset); + cache->page = page; + cache->vaddr = (unsigned long)vaddr; + + return vaddr; +} + +static void *reloc_vaddr(struct drm_i915_gem_object *obj, + struct i915_execbuffer *eb, + unsigned long page) +{ + struct reloc_cache *cache = &eb->reloc_cache; + void *vaddr; + + if (cache->page == page) { + vaddr = unmask_page(cache->vaddr); + } else { + vaddr = NULL; + if ((cache->vaddr & KMAP) == 0) + vaddr = reloc_iomap(obj, eb, page); + if (!vaddr) + vaddr = reloc_kmap(obj, cache, page); + } + + return vaddr; +} + +static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) +{ + if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { + if (flushes & CLFLUSH_BEFORE) { + clflushopt(addr); + mb(); + } + + *addr = value; + + /* + * Writes to the same cacheline are serialised by the CPU + * (including clflush). On the write path, we only require + * that it hits memory in an orderly fashion and place + * mb barriers at the start and end of the relocation phase + * to ensure ordering of clflush wrt to the system. + */ + if (flushes & CLFLUSH_AFTER) + clflushopt(addr); + } else + *addr = value; } static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) @@ -1054,7 +1247,7 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) struct drm_i915_gem_object *obj = vma->obj; int err; - i915_vma_lock(vma); + assert_vma_held(vma); if (obj->cache_dirty & ~obj->cache_coherent) i915_gem_clflush_object(obj, 0); @@ -1064,25 +1257,31 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); - return err; } static int __reloc_gpu_alloc(struct i915_execbuffer *eb, struct intel_engine_cs *engine, + struct i915_vma *vma, unsigned int len) { struct reloc_cache *cache = &eb->reloc_cache; - struct intel_gt_buffer_pool_node *pool; + struct intel_gt_buffer_pool_node *pool = eb->reloc_pool; struct i915_request *rq; struct i915_vma *batch; u32 *cmd; int err; - pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE); - if (IS_ERR(pool)) - return PTR_ERR(pool); + if (!pool) { + pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE); + if (IS_ERR(pool)) + return PTR_ERR(pool); + } + eb->reloc_pool = NULL; + + err = i915_gem_object_lock(pool->obj, &eb->ww); + if (err) + goto err_pool; cmd = i915_gem_object_pin_map(pool->obj, cache->has_llc ? @@ -1090,35 +1289,42 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, I915_MAP_FORCE_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto out_pool; + goto err_pool; } - batch = i915_vma_instance(pool->obj, eb->context->vm, NULL); + batch = i915_vma_instance(pool->obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto err_unmap; } - err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); + err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK); if (err) goto err_unmap; if (engine == eb->context->engine) { rq = i915_request_create(eb->context); } else { - struct intel_context *ce; + struct intel_context *ce = eb->reloc_context; - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto err_unpin; + if (!ce) { + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto err_unpin; + } + + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(eb->context->vm); + eb->reloc_context = ce; } - i915_vm_put(ce->vm); - ce->vm = i915_vm_get(eb->context->vm); + err = intel_context_pin_ww(ce, &eb->ww); + if (err) + goto err_unpin; - rq = intel_context_create_request(ce); - intel_context_put(ce); + rq = i915_request_create(ce); + intel_context_unpin(ce); } if (IS_ERR(rq)) { err = PTR_ERR(rq); @@ -1129,11 +1335,20 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (err) goto err_request; - i915_vma_lock(batch); + err = reloc_move_to_gpu(rq, vma); + if (err) + goto err_request; + + err = eb->engine->emit_bb_start(rq, + batch->node.start, PAGE_SIZE, + cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); + if (err) + goto skip_request; + + assert_vma_held(batch); err = i915_request_await_object(rq, batch->obj, false); if (err == 0) err = i915_vma_move_to_active(batch, rq, 0); - i915_vma_unlock(batch); if (err) goto skip_request; @@ -1143,10 +1358,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, cache->rq = rq; cache->rq_cmd = cmd; cache->rq_size = 0; - cache->rq_vma = batch; + cache->pool = pool; /* Return with batch mapping (cmd) still pinned */ - goto out_pool; + return 0; skip_request: i915_request_set_error_once(rq, err); @@ -1156,8 +1371,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, i915_vma_unpin(batch); err_unmap: i915_gem_object_unpin_map(pool->obj); -out_pool: - intel_gt_buffer_pool_put(pool); +err_pool: + eb->reloc_pool = pool; return err; } @@ -1172,9 +1387,12 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, { struct reloc_cache *cache = &eb->reloc_cache; u32 *cmd; - int err; + + if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1)) + reloc_gpu_flush(eb, cache); if (unlikely(!cache->rq)) { + int err; struct intel_engine_cs *engine = eb->engine; if (!reloc_can_use_engine(engine)) { @@ -1183,37 +1401,28 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, return ERR_PTR(-ENODEV); } - err = __reloc_gpu_alloc(eb, engine, len); + err = __reloc_gpu_alloc(eb, engine, vma, len); if (unlikely(err)) return ERR_PTR(err); } - if (vma != cache->target) { - err = reloc_move_to_gpu(cache->rq, vma); - if (unlikely(err)) { - i915_request_set_error_once(cache->rq, err); - return ERR_PTR(err); - } - - cache->target = vma; - } - - if (unlikely(cache->rq_size + len > - PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) { - err = reloc_gpu_chain(cache); - if (unlikely(err)) { - i915_request_set_error_once(cache->rq, err); - return ERR_PTR(err); - } - } - - GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE / sizeof(u32)); cmd = cache->rq_cmd + cache->rq_size; cache->rq_size += len; return cmd; } +static inline bool use_reloc_gpu(struct i915_vma *vma) +{ + if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) + return true; + + if (DBG_FORCE_RELOC) + return false; + + return !dma_resv_test_signaled_rcu(vma->resv, true); +} + static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) { struct page *page; @@ -1229,9 +1438,9 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) } static int __reloc_entry_gpu(struct i915_execbuffer *eb, - struct i915_vma *vma, - u64 offset, - u64 target_addr) + struct i915_vma *vma, + u64 offset, + u64 target_addr) { const unsigned int gen = eb->reloc_cache.gen; unsigned int len; @@ -1246,8 +1455,10 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, len = 3; batch = reloc_gpu(eb, vma, len); - if (IS_ERR(batch)) - return PTR_ERR(batch); + if (batch == ERR_PTR(-EDEADLK)) + return -EDEADLK; + else if (IS_ERR(batch)) + return false; addr = gen8_canonical_addr(vma->node.start + offset); if (gen >= 8) { @@ -1296,21 +1507,58 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, *batch++ = target_addr; } - return 0; + return true; +} + +static int reloc_entry_gpu(struct i915_execbuffer *eb, + struct i915_vma *vma, + u64 offset, + u64 target_addr) +{ + if (eb->reloc_cache.vaddr) + return false; + + if (!use_reloc_gpu(vma)) + return false; + + return __reloc_entry_gpu(eb, vma, offset, target_addr); } static u64 -relocate_entry(struct i915_execbuffer *eb, - struct i915_vma *vma, +relocate_entry(struct i915_vma *vma, const struct drm_i915_gem_relocation_entry *reloc, + struct i915_execbuffer *eb, const struct i915_vma *target) { u64 target_addr = relocation_target(reloc, target); - int err; - - err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr); - if (err) - return err; + u64 offset = reloc->offset; + int reloc_gpu = reloc_entry_gpu(eb, vma, offset, target_addr); + + if (reloc_gpu < 0) + return reloc_gpu; + + if (!reloc_gpu) { + bool wide = eb->reloc_cache.use_64bit_reloc; + void *vaddr; + +repeat: + vaddr = reloc_vaddr(vma->obj, eb, + offset >> PAGE_SHIFT); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32))); + clflush_write32(vaddr + offset_in_page(offset), + lower_32_bits(target_addr), + eb->reloc_cache.vaddr); + + if (wide) { + offset += sizeof(u32); + target_addr >>= 32; + wide = false; + goto repeat; + } + } return target->node.start | UPDATE; } @@ -1375,7 +1623,8 @@ eb_relocate_entry(struct i915_execbuffer *eb, * If the relocation already has the right value in it, no * more work needs to be done. */ - if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset) + if (!DBG_FORCE_RELOC && + gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset) return 0; /* Check that the relocation address is valid... */ @@ -1407,7 +1656,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, ev->flags &= ~EXEC_OBJECT_ASYNC; /* and update the user's relocation entry */ - return relocate_entry(eb, ev->vma, reloc, target->vma); + return relocate_entry(ev->vma, reloc, eb, target->vma); } static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) @@ -1444,9 +1693,13 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) * we would try to acquire the struct mutex again. Obviously * this is bad and so lockdep complains vehemently. */ - copied = __copy_from_user(r, urelocs, count * sizeof(r[0])); - if (unlikely(copied)) - return -EFAULT; + pagefault_disable(); + copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); + pagefault_enable(); + if (unlikely(copied)) { + remain = -EFAULT; + goto out; + } remain -= count; do { @@ -1454,7 +1707,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) if (likely(offset == 0)) { } else if ((s64)offset < 0) { - return (int)offset; + remain = (int)offset; + goto out; } else { /* * Note that reporting an error now @@ -1484,74 +1738,401 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) } while (r++, --count); urelocs += ARRAY_SIZE(stack); } while (remain); +out: + reloc_cache_reset(&eb->reloc_cache, eb); + return remain; +} - return 0; +static int +eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev) +{ + const struct drm_i915_gem_exec_object2 *entry = ev->exec; + struct drm_i915_gem_relocation_entry *relocs = + u64_to_ptr(typeof(*relocs), entry->relocs_ptr); + unsigned int i; + int err; + + for (i = 0; i < entry->relocation_count; i++) { + u64 offset = eb_relocate_entry(eb, ev, &relocs[i]); + + if ((s64)offset < 0) { + err = (int)offset; + goto err; + } + } + err = 0; +err: + reloc_cache_reset(&eb->reloc_cache, eb); + return err; +} + +static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) +{ + const char __user *addr, *end; + unsigned long size; + char __maybe_unused c; + + size = entry->relocation_count; + if (size == 0) + return 0; + + if (size > N_RELOC(ULONG_MAX)) + return -EINVAL; + + addr = u64_to_user_ptr(entry->relocs_ptr); + size *= sizeof(struct drm_i915_gem_relocation_entry); + if (!access_ok(addr, size)) + return -EFAULT; + + end = addr + size; + for (; addr < end; addr += PAGE_SIZE) { + int err = __get_user(c, addr); + if (err) + return err; + } + return __get_user(c, end - 1); } -static int eb_relocate(struct i915_execbuffer *eb) +static int eb_copy_relocations(const struct i915_execbuffer *eb) { + struct drm_i915_gem_relocation_entry *relocs; + const unsigned int count = eb->buffer_count; + unsigned int i; int err; - err = eb_lookup_vmas(eb); - if (err) - return err; + for (i = 0; i < count; i++) { + const unsigned int nreloc = eb->exec[i].relocation_count; + struct drm_i915_gem_relocation_entry __user *urelocs; + unsigned long size; + unsigned long copied; + + if (nreloc == 0) + continue; - if (!list_empty(&eb->unbound)) { - err = eb_reserve(eb); + err = check_relocations(&eb->exec[i]); + if (err) + goto err; + + urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); + size = nreloc * sizeof(*relocs); + + relocs = kvmalloc_array(size, 1, GFP_KERNEL); + if (!relocs) { + err = -ENOMEM; + goto err; + } + + /* copy_from_user is limited to < 4GiB */ + copied = 0; + do { + unsigned int len = + min_t(u64, BIT_ULL(31), size - copied); + + if (__copy_from_user((char *)relocs + copied, + (char __user *)urelocs + copied, + len)) + goto end; + + copied += len; + } while (copied < size); + + /* + * As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + if (!user_access_begin(urelocs, size)) + goto end; + + for (copied = 0; copied < nreloc; copied++) + unsafe_put_user(-1, + &urelocs[copied].presumed_offset, + end_user); + user_access_end(); + + eb->exec[i].relocs_ptr = (uintptr_t)relocs; + } + + return 0; + +end_user: + user_access_end(); +end: + kvfree(relocs); + err = -EFAULT; +err: + while (i--) { + relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); + if (eb->exec[i].relocation_count) + kvfree(relocs); + } + return err; +} + +static int eb_prefault_relocations(const struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i; + + for (i = 0; i < count; i++) { + int err; + + err = check_relocations(&eb->exec[i]); if (err) return err; } - /* The objects are in their final locations, apply the relocations. */ - if (eb->args->flags & __EXEC_HAS_RELOC) { - struct eb_vma *ev; - int flush; + return 0; +} - list_for_each_entry(ev, &eb->relocs, reloc_link) { +static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb, + struct i915_request *rq) +{ + bool have_copy = false; + struct eb_vma *ev; + int err = 0; + +repeat: + if (signal_pending(current)) { + err = -ERESTARTSYS; + goto out; + } + + /* We may process another execbuffer during the unlock... */ + eb_release_vmas(eb, false); + i915_gem_ww_ctx_fini(&eb->ww); + + if (rq) { + /* nonblocking is always false */ + if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT) < 0) { + i915_request_put(rq); + rq = NULL; + + err = -EINTR; + goto err_relock; + } + + i915_request_put(rq); + rq = NULL; + } + + /* + * We take 3 passes through the slowpatch. + * + * 1 - we try to just prefault all the user relocation entries and + * then attempt to reuse the atomic pagefault disabled fast path again. + * + * 2 - we copy the user entries to a local buffer here outside of the + * local and allow ourselves to wait upon any rendering before + * relocations + * + * 3 - we already have a local copy of the relocation entries, but + * were interrupted (EAGAIN) whilst waiting for the objects, try again. + */ + if (!err) { + err = eb_prefault_relocations(eb); + } else if (!have_copy) { + err = eb_copy_relocations(eb); + have_copy = err == 0; + } else { + cond_resched(); + err = 0; + } + + if (!err) + flush_workqueue(eb->i915->mm.userptr_wq); + +err_relock: + i915_gem_ww_ctx_init(&eb->ww, true); + if (err) + goto out; + + /* reacquire the objects */ +repeat_validate: + rq = eb_pin_engine(eb, false); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + rq = NULL; + goto err; + } + + /* We didn't throttle, should be NULL */ + GEM_WARN_ON(rq); + + err = eb_validate_vmas(eb); + if (err) + goto err; + + GEM_BUG_ON(!eb->batch); + + list_for_each_entry(ev, &eb->relocs, reloc_link) { + if (!have_copy) { + pagefault_disable(); err = eb_relocate_vma(eb, ev); + pagefault_enable(); + if (err) + break; + } else { + err = eb_relocate_vma_slow(eb, ev); if (err) break; } + } + + if (err == -EDEADLK) + goto err; + + if (err && !have_copy) + goto repeat; + + if (err) + goto err; + + /* as last step, parse the command buffer */ + err = eb_parse(eb); + if (err) + goto err; + + /* + * Leave the user relocations as are, this is the painfully slow path, + * and we want to avoid the complication of dropping the lock whilst + * having buffers reserved in the aperture and so causing spurious + * ENOSPC for random operations. + */ - flush = reloc_gpu_flush(&eb->reloc_cache); +err: + if (err == -EDEADLK) { + eb_release_vmas(eb, false); + err = i915_gem_ww_ctx_backoff(&eb->ww); if (!err) - err = flush; + goto repeat_validate; } + if (err == -EAGAIN) + goto repeat; + +out: + if (have_copy) { + const unsigned int count = eb->buffer_count; + unsigned int i; + + for (i = 0; i < count; i++) { + const struct drm_i915_gem_exec_object2 *entry = + &eb->exec[i]; + struct drm_i915_gem_relocation_entry *relocs; + + if (!entry->relocation_count) + continue; + + relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); + kvfree(relocs); + } + } + + if (rq) + i915_request_put(rq); + return err; } -static int eb_move_to_gpu(struct i915_execbuffer *eb) +static int eb_relocate_parse(struct i915_execbuffer *eb) { - const unsigned int count = eb->buffer_count; - struct ww_acquire_ctx acquire; - unsigned int i; - int err = 0; + int err; + struct i915_request *rq = NULL; + bool throttle = true; - ww_acquire_init(&acquire, &reservation_ww_class); +retry: + rq = eb_pin_engine(eb, throttle); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + rq = NULL; + if (err != -EDEADLK) + return err; - for (i = 0; i < count; i++) { - struct eb_vma *ev = &eb->vma[i]; - struct i915_vma *vma = ev->vma; + goto err; + } + + if (rq) { + bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; - err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); - if (err == -EDEADLK) { - GEM_BUG_ON(i == 0); - do { - int j = i - 1; + /* Need to drop all locks now for throttling, take slowpath */ + err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0); + if (err == -ETIME) { + if (nonblock) { + err = -EWOULDBLOCK; + i915_request_put(rq); + goto err; + } + goto slow; + } + i915_request_put(rq); + rq = NULL; + } - ww_mutex_unlock(&eb->vma[j].vma->resv->lock); + /* only throttle once, even if we didn't need to throttle */ + throttle = false; - swap(eb->vma[i], eb->vma[j]); - } while (--i); + err = eb_validate_vmas(eb); + if (err == -EAGAIN) + goto slow; + else if (err) + goto err; - err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, - &acquire); + /* The objects are in their final locations, apply the relocations. */ + if (eb->args->flags & __EXEC_HAS_RELOC) { + struct eb_vma *ev; + + list_for_each_entry(ev, &eb->relocs, reloc_link) { + err = eb_relocate_vma(eb, ev); + if (err) + break; } - if (err) - break; + + if (err == -EDEADLK) + goto err; + else if (err) + goto slow; + } + + if (!err) + err = eb_parse(eb); + +err: + if (err == -EDEADLK) { + eb_release_vmas(eb, false); + err = i915_gem_ww_ctx_backoff(&eb->ww); + if (!err) + goto retry; } - ww_acquire_done(&acquire); + + return err; + +slow: + err = eb_relocate_parse_slow(eb, rq); + if (err) + /* + * If the user expects the execobject.offset and + * reloc.presumed_offset to be an exact match, + * as for using NO_RELOC, then we cannot update + * the execobject.offset until we have completed + * relocation. + */ + eb->args->flags &= ~__EXEC_HAS_RELOC; + + return err; +} + +static int eb_move_to_gpu(struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i = count; + int err = 0; while (i--) { struct eb_vma *ev = &eb->vma[i]; @@ -1595,14 +2176,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) } if (err == 0) - err = i915_vma_move_to_active(vma, eb->request, flags); - - i915_vma_unlock(vma); - eb_unreserve_vma(ev); + err = i915_vma_move_to_active(vma, eb->request, flags); } - ww_acquire_fini(&acquire); - - eb_vma_array_put(fetch_and_zero(&eb->array)); if (unlikely(err)) goto err_skip; @@ -1622,7 +2197,8 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) return -EINVAL; /* Kernel clipping was a DRI1 misfeature */ - if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { + if (!(exec->flags & (I915_EXEC_FENCE_ARRAY | + I915_EXEC_USE_EXTENSIONS))) { if (exec->num_cliprects || exec->cliprects_ptr) return -EINVAL; } @@ -1666,7 +2242,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) } static struct i915_vma * -shadow_batch_pin(struct drm_i915_gem_object *obj, +shadow_batch_pin(struct i915_execbuffer *eb, + struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned int flags) { @@ -1677,7 +2254,7 @@ shadow_batch_pin(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return vma; - err = i915_vma_pin(vma, 0, 0, flags); + err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags); if (err) return ERR_PTR(err); @@ -1729,7 +2306,7 @@ __parser_mark_active(struct i915_vma *vma, { struct intel_gt_buffer_pool_node *node = vma->private; - return i915_active_ref(&node->active, tl, fence); + return i915_active_ref(&node->active, tl->fence_context, fence); } static int @@ -1793,36 +2370,26 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, if (err) goto err_commit; - err = dma_resv_lock_interruptible(pw->batch->resv, NULL); - if (err) - goto err_commit; - err = dma_resv_reserve_shared(pw->batch->resv, 1); if (err) - goto err_commit_unlock; + goto err_commit; /* Wait for all writes (and relocs) into the batch to complete */ err = i915_sw_fence_await_reservation(&pw->base.chain, pw->batch->resv, NULL, false, 0, I915_FENCE_GFP); if (err < 0) - goto err_commit_unlock; + goto err_commit; /* Keep the batch alive and unwritten as we parse */ dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma); - dma_resv_unlock(pw->batch->resv); - /* Force execution to wait for completion of the parser */ - dma_resv_lock(shadow->resv, NULL); dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); - dma_resv_unlock(shadow->resv); dma_fence_work_commit_imm(&pw->base); return 0; -err_commit_unlock: - dma_resv_unlock(pw->batch->resv); err_commit: i915_sw_fence_set_error_once(&pw->base.chain, err); dma_fence_work_commit_imm(&pw->base); @@ -1837,16 +2404,33 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, return err; } +static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma) +{ + /* + * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure + * batch" bit. Hence we need to pin secure batches into the global gtt. + * hsw should have this fixed, but bdw mucks it up again. */ + if (eb->batch_flags & I915_DISPATCH_SECURE) + return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0); + + return NULL; +} + static int eb_parse(struct i915_execbuffer *eb) { struct drm_i915_private *i915 = eb->i915; - struct intel_gt_buffer_pool_node *pool; - struct i915_vma *shadow, *trampoline; + struct intel_gt_buffer_pool_node *pool = eb->batch_pool; + struct i915_vma *shadow, *trampoline, *batch; unsigned int len; int err; - if (!eb_use_cmdparser(eb)) - return 0; + if (!eb_use_cmdparser(eb)) { + batch = eb_dispatch_secure(eb, eb->batch->vma); + if (IS_ERR(batch)) + return PTR_ERR(batch); + + goto secure_batch; + } len = eb->batch_len; if (!CMDPARSER_USES_GGTT(eb->i915)) { @@ -1863,11 +2447,18 @@ static int eb_parse(struct i915_execbuffer *eb) len += I915_CMD_PARSER_TRAMPOLINE_SIZE; } - pool = intel_gt_get_buffer_pool(eb->engine->gt, len); - if (IS_ERR(pool)) - return PTR_ERR(pool); + if (!pool) { + pool = intel_gt_get_buffer_pool(eb->engine->gt, len); + if (IS_ERR(pool)) + return PTR_ERR(pool); + eb->batch_pool = pool; + } + + err = i915_gem_object_lock(pool->obj, &eb->ww); + if (err) + goto err; - shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER); + shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER); if (IS_ERR(shadow)) { err = PTR_ERR(shadow); goto err; @@ -1879,7 +2470,7 @@ static int eb_parse(struct i915_execbuffer *eb) if (CMDPARSER_USES_GGTT(eb->i915)) { trampoline = shadow; - shadow = shadow_batch_pin(pool->obj, + shadow = shadow_batch_pin(eb, pool->obj, &eb->engine->gt->ggtt->vm, PIN_GLOBAL); if (IS_ERR(shadow)) { @@ -1892,42 +2483,43 @@ static int eb_parse(struct i915_execbuffer *eb) eb->batch_flags |= I915_DISPATCH_SECURE; } + batch = eb_dispatch_secure(eb, shadow); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err_trampoline; + } + err = eb_parse_pipeline(eb, shadow, trampoline); if (err) - goto err_trampoline; + goto err_unpin_batch; - eb->vma[eb->buffer_count].vma = i915_vma_get(shadow); - eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN; eb->batch = &eb->vma[eb->buffer_count++]; - eb->vma[eb->buffer_count].vma = NULL; + eb->batch->vma = i915_vma_get(shadow); + eb->batch->flags = __EXEC_OBJECT_HAS_PIN; eb->trampoline = trampoline; eb->batch_start_offset = 0; +secure_batch: + if (batch) { + eb->batch = &eb->vma[eb->buffer_count++]; + eb->batch->flags = __EXEC_OBJECT_HAS_PIN; + eb->batch->vma = i915_vma_get(batch); + } return 0; +err_unpin_batch: + if (batch) + i915_vma_unpin(batch); err_trampoline: if (trampoline) i915_vma_unpin(trampoline); err_shadow: i915_vma_unpin(shadow); err: - intel_gt_buffer_pool_put(pool); return err; } -static void -add_to_client(struct i915_request *rq, struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - - rq->file_priv = file_priv; - - spin_lock(&file_priv->mm.lock); - list_add_tail(&rq->client_link, &file_priv->mm.request_list); - spin_unlock(&file_priv->mm.lock); -} - static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) { int err; @@ -2009,7 +2601,7 @@ static const enum intel_engine_id user_ring_map[] = { [I915_EXEC_VEBOX] = VECS0 }; -static struct i915_request *eb_throttle(struct intel_context *ce) +static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce) { struct intel_ring *ring = ce->ring; struct intel_timeline *tl = ce->timeline; @@ -2043,31 +2635,26 @@ static struct i915_request *eb_throttle(struct intel_context *ce) return i915_request_get(rq); } -static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) +static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle) { + struct intel_context *ce = eb->context; struct intel_timeline *tl; - struct i915_request *rq; + struct i915_request *rq = NULL; int err; - /* - * ABI: Before userspace accesses the GPU (e.g. execbuffer), report - * EIO if the GPU is already wedged. - */ - err = intel_gt_terminally_wedged(ce->engine->gt); - if (err) - return err; + GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); if (unlikely(intel_context_is_banned(ce))) - return -EIO; + return ERR_PTR(-EIO); /* * Pinning the contexts may generate requests in order to acquire * GGTT space, so do this first before we reserve a seqno for * ourselves. */ - err = intel_context_pin(ce); + err = intel_context_pin_ww(ce, &eb->ww); if (err) - return err; + return ERR_PTR(err); /* * Take a local wakeref for preparing to dispatch the execbuf as @@ -2079,45 +2666,17 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) */ tl = intel_context_timeline_lock(ce); if (IS_ERR(tl)) { - err = PTR_ERR(tl); - goto err_unpin; + intel_context_unpin(ce); + return ERR_CAST(tl); } intel_context_enter(ce); - rq = eb_throttle(ce); - + if (throttle) + rq = eb_throttle(eb, ce); intel_context_timeline_unlock(tl); - if (rq) { - bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; - long timeout; - - timeout = MAX_SCHEDULE_TIMEOUT; - if (nonblock) - timeout = 0; - - timeout = i915_request_wait(rq, - I915_WAIT_INTERRUPTIBLE, - timeout); - i915_request_put(rq); - - if (timeout < 0) { - err = nonblock ? -EWOULDBLOCK : timeout; - goto err_exit; - } - } - - eb->engine = ce->engine; - eb->context = ce; - return 0; - -err_exit: - mutex_lock(&tl->mutex); - intel_context_exit(ce); - intel_context_timeline_unlock(tl); -err_unpin: - intel_context_unpin(ce); - return err; + eb->args->flags |= __EXEC_ENGINE_PINNED; + return rq; } static void eb_unpin_engine(struct i915_execbuffer *eb) @@ -2125,6 +2684,11 @@ static void eb_unpin_engine(struct i915_execbuffer *eb) struct intel_context *ce = eb->context; struct intel_timeline *tl = ce->timeline; + if (!(eb->args->flags & __EXEC_ENGINE_PINNED)) + return; + + eb->args->flags &= ~__EXEC_ENGINE_PINNED; + mutex_lock(&tl->mutex); intel_context_exit(ce); mutex_unlock(&tl->mutex); @@ -2133,11 +2697,10 @@ static void eb_unpin_engine(struct i915_execbuffer *eb) } static unsigned int -eb_select_legacy_ring(struct i915_execbuffer *eb, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args) +eb_select_legacy_ring(struct i915_execbuffer *eb) { struct drm_i915_private *i915 = eb->i915; + struct drm_i915_gem_execbuffer2 *args = eb->args; unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; if (user_ring_id != I915_EXEC_BSD && @@ -2152,7 +2715,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; if (bsd_idx == I915_EXEC_BSD_DEFAULT) { - bsd_idx = gen8_dispatch_bsd_engine(i915, file); + bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file); } else if (bsd_idx >= I915_EXEC_BSD_RING1 && bsd_idx <= I915_EXEC_BSD_RING2) { bsd_idx >>= I915_EXEC_BSD_SHIFT; @@ -2177,131 +2740,297 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, } static int -eb_pin_engine(struct i915_execbuffer *eb, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args) +eb_select_engine(struct i915_execbuffer *eb) { struct intel_context *ce; unsigned int idx; int err; if (i915_gem_context_user_engines(eb->gem_context)) - idx = args->flags & I915_EXEC_RING_MASK; + idx = eb->args->flags & I915_EXEC_RING_MASK; else - idx = eb_select_legacy_ring(eb, file, args); + idx = eb_select_legacy_ring(eb); ce = i915_gem_context_get_engine(eb->gem_context, idx); if (IS_ERR(ce)) return PTR_ERR(ce); - err = __eb_pin_engine(eb, ce); - intel_context_put(ce); + intel_gt_pm_get(ce->engine->gt); + + if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + err = intel_context_alloc_state(ce); + if (err) + goto err; + } + + /* + * ABI: Before userspace accesses the GPU (e.g. execbuffer), report + * EIO if the GPU is already wedged. + */ + err = intel_gt_terminally_wedged(ce->engine->gt); + if (err) + goto err; + + eb->context = ce; + eb->engine = ce->engine; + /* + * Make sure engine pool stays alive even if we call intel_context_put + * during ww handling. The pool is destroyed when last pm reference + * is dropped, which breaks our -EDEADLK handling. + */ + return err; + +err: + intel_gt_pm_put(ce->engine->gt); + intel_context_put(ce); return err; } static void -__free_fence_array(struct drm_syncobj **fences, unsigned int n) +eb_put_engine(struct i915_execbuffer *eb) +{ + intel_gt_pm_put(eb->engine->gt); + intel_context_put(eb->context); +} + +static void +__free_fence_array(struct eb_fence *fences, unsigned int n) { - while (n--) - drm_syncobj_put(ptr_mask_bits(fences[n], 2)); + while (n--) { + drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); + dma_fence_put(fences[n].dma_fence); + kfree(fences[n].chain_fence); + } kvfree(fences); } -static struct drm_syncobj ** -get_fence_array(struct drm_i915_gem_execbuffer2 *args, - struct drm_file *file) +static int +add_timeline_fence_array(struct i915_execbuffer *eb, + const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences) { - const unsigned long nfences = args->num_cliprects; - struct drm_i915_gem_exec_fence __user *user; - struct drm_syncobj **fences; - unsigned long n; - int err; + struct drm_i915_gem_exec_fence __user *user_fences; + u64 __user *user_values; + struct eb_fence *f; + u64 nfences; + int err = 0; - if (!(args->flags & I915_EXEC_FENCE_ARRAY)) - return NULL; + nfences = timeline_fences->fence_count; + if (!nfences) + return 0; /* Check multiplication overflow for access_ok() and kvmalloc_array() */ BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); if (nfences > min_t(unsigned long, - ULONG_MAX / sizeof(*user), - SIZE_MAX / sizeof(*fences))) - return ERR_PTR(-EINVAL); + ULONG_MAX / sizeof(*user_fences), + SIZE_MAX / sizeof(*f)) - eb->num_fences) + return -EINVAL; - user = u64_to_user_ptr(args->cliprects_ptr); - if (!access_ok(user, nfences * sizeof(*user))) - return ERR_PTR(-EFAULT); + user_fences = u64_to_user_ptr(timeline_fences->handles_ptr); + if (!access_ok(user_fences, nfences * sizeof(*user_fences))) + return -EFAULT; + + user_values = u64_to_user_ptr(timeline_fences->values_ptr); + if (!access_ok(user_values, nfences * sizeof(*user_values))) + return -EFAULT; + + f = krealloc(eb->fences, + (eb->num_fences + nfences) * sizeof(*f), + __GFP_NOWARN | GFP_KERNEL); + if (!f) + return -ENOMEM; + + eb->fences = f; + f += eb->num_fences; - fences = kvmalloc_array(nfences, sizeof(*fences), - __GFP_NOWARN | GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); + BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & + ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); - for (n = 0; n < nfences; n++) { - struct drm_i915_gem_exec_fence fence; + while (nfences--) { + struct drm_i915_gem_exec_fence user_fence; struct drm_syncobj *syncobj; + struct dma_fence *fence = NULL; + u64 point; - if (__copy_from_user(&fence, user++, sizeof(fence))) { - err = -EFAULT; - goto err; + if (__copy_from_user(&user_fence, + user_fences++, + sizeof(user_fence))) + return -EFAULT; + + if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) + return -EINVAL; + + if (__get_user(point, user_values++)) + return -EFAULT; + + syncobj = drm_syncobj_find(eb->file, user_fence.handle); + if (!syncobj) { + DRM_DEBUG("Invalid syncobj handle provided\n"); + return -ENOENT; } - if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) { - err = -EINVAL; - goto err; + fence = drm_syncobj_fence_get(syncobj); + + if (!fence && user_fence.flags && + !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { + DRM_DEBUG("Syncobj handle has no fence\n"); + drm_syncobj_put(syncobj); + return -EINVAL; } - syncobj = drm_syncobj_find(file, fence.handle); + if (fence) + err = dma_fence_chain_find_seqno(&fence, point); + + if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { + DRM_DEBUG("Syncobj handle missing requested point %llu\n", point); + dma_fence_put(fence); + drm_syncobj_put(syncobj); + return err; + } + + /* + * A point might have been signaled already and + * garbage collected from the timeline. In this case + * just ignore the point and carry on. + */ + if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { + drm_syncobj_put(syncobj); + continue; + } + + /* + * For timeline syncobjs we need to preallocate chains for + * later signaling. + */ + if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) { + /* + * Waiting and signaling the same point (when point != + * 0) would break the timeline. + */ + if (user_fence.flags & I915_EXEC_FENCE_WAIT) { + DRM_DEBUG("Trying to wait & signal the same timeline point.\n"); + dma_fence_put(fence); + drm_syncobj_put(syncobj); + return -EINVAL; + } + + f->chain_fence = + kmalloc(sizeof(*f->chain_fence), + GFP_KERNEL); + if (!f->chain_fence) { + drm_syncobj_put(syncobj); + dma_fence_put(fence); + return -ENOMEM; + } + } else { + f->chain_fence = NULL; + } + + f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); + f->dma_fence = fence; + f->value = point; + f++; + eb->num_fences++; + } + + return 0; +} + +static int add_fence_array(struct i915_execbuffer *eb) +{ + struct drm_i915_gem_execbuffer2 *args = eb->args; + struct drm_i915_gem_exec_fence __user *user; + unsigned long num_fences = args->num_cliprects; + struct eb_fence *f; + + if (!(args->flags & I915_EXEC_FENCE_ARRAY)) + return 0; + + if (!num_fences) + return 0; + + /* Check multiplication overflow for access_ok() and kvmalloc_array() */ + BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); + if (num_fences > min_t(unsigned long, + ULONG_MAX / sizeof(*user), + SIZE_MAX / sizeof(*f) - eb->num_fences)) + return -EINVAL; + + user = u64_to_user_ptr(args->cliprects_ptr); + if (!access_ok(user, num_fences * sizeof(*user))) + return -EFAULT; + + f = krealloc(eb->fences, + (eb->num_fences + num_fences) * sizeof(*f), + __GFP_NOWARN | GFP_KERNEL); + if (!f) + return -ENOMEM; + + eb->fences = f; + f += eb->num_fences; + while (num_fences--) { + struct drm_i915_gem_exec_fence user_fence; + struct drm_syncobj *syncobj; + struct dma_fence *fence = NULL; + + if (__copy_from_user(&user_fence, user++, sizeof(user_fence))) + return -EFAULT; + + if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) + return -EINVAL; + + syncobj = drm_syncobj_find(eb->file, user_fence.handle); if (!syncobj) { DRM_DEBUG("Invalid syncobj handle provided\n"); - err = -ENOENT; - goto err; + return -ENOENT; + } + + if (user_fence.flags & I915_EXEC_FENCE_WAIT) { + fence = drm_syncobj_fence_get(syncobj); + if (!fence) { + DRM_DEBUG("Syncobj handle has no fence\n"); + drm_syncobj_put(syncobj); + return -EINVAL; + } } BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); - fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); + f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); + f->dma_fence = fence; + f->value = 0; + f->chain_fence = NULL; + f++; + eb->num_fences++; } - return fences; - -err: - __free_fence_array(fences, n); - return ERR_PTR(err); + return 0; } -static void -put_fence_array(struct drm_i915_gem_execbuffer2 *args, - struct drm_syncobj **fences) +static void put_fence_array(struct eb_fence *fences, int num_fences) { if (fences) - __free_fence_array(fences, args->num_cliprects); + __free_fence_array(fences, num_fences); } static int -await_fence_array(struct i915_execbuffer *eb, - struct drm_syncobj **fences) +await_fence_array(struct i915_execbuffer *eb) { - const unsigned int nfences = eb->args->num_cliprects; unsigned int n; int err; - for (n = 0; n < nfences; n++) { + for (n = 0; n < eb->num_fences; n++) { struct drm_syncobj *syncobj; - struct dma_fence *fence; unsigned int flags; - syncobj = ptr_unpack_bits(fences[n], &flags, 2); - if (!(flags & I915_EXEC_FENCE_WAIT)) - continue; + syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2); - fence = drm_syncobj_fence_get(syncobj); - if (!fence) - return -EINVAL; + if (!eb->fences[n].dma_fence) + continue; - err = i915_request_await_dma_fence(eb->request, fence); - dma_fence_put(fence); + err = i915_request_await_dma_fence(eb->request, + eb->fences[n].dma_fence); if (err < 0) return err; } @@ -2309,26 +3038,47 @@ await_fence_array(struct i915_execbuffer *eb, return 0; } -static void -signal_fence_array(struct i915_execbuffer *eb, - struct drm_syncobj **fences) +static void signal_fence_array(const struct i915_execbuffer *eb) { - const unsigned int nfences = eb->args->num_cliprects; struct dma_fence * const fence = &eb->request->fence; unsigned int n; - for (n = 0; n < nfences; n++) { + for (n = 0; n < eb->num_fences; n++) { struct drm_syncobj *syncobj; unsigned int flags; - syncobj = ptr_unpack_bits(fences[n], &flags, 2); + syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2); if (!(flags & I915_EXEC_FENCE_SIGNAL)) continue; - drm_syncobj_replace_fence(syncobj, fence); + if (eb->fences[n].chain_fence) { + drm_syncobj_add_point(syncobj, + eb->fences[n].chain_fence, + fence, + eb->fences[n].value); + /* + * The chain's ownership is transferred to the + * timeline. + */ + eb->fences[n].chain_fence = NULL; + } else { + drm_syncobj_replace_fence(syncobj, fence); + } } } +static int +parse_timeline_fences(struct i915_user_extension __user *ext, void *data) +{ + struct i915_execbuffer *eb = data; + struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences; + + if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences))) + return -EFAULT; + + return add_timeline_fence_array(eb, &timeline_fences); +} + static void retire_requests(struct intel_timeline *tl, struct i915_request *end) { struct i915_request *rq, *rn; @@ -2370,12 +3120,37 @@ static void eb_request_add(struct i915_execbuffer *eb) mutex_unlock(&tl->mutex); } +static const i915_user_extension_fn execbuf_extensions[] = { + [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences, +}; + +static int +parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args, + struct i915_execbuffer *eb) +{ + if (!(args->flags & I915_EXEC_USE_EXTENSIONS)) + return 0; + + /* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot + * have another flag also using it at the same time. + */ + if (eb->args->flags & I915_EXEC_FENCE_ARRAY) + return -EINVAL; + + if (args->num_cliprects != 0) + return -EINVAL; + + return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr), + execbuf_extensions, + ARRAY_SIZE(execbuf_extensions), + eb); +} + static int i915_gem_do_execbuffer(struct drm_device *dev, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec, - struct drm_syncobj **fences) + struct drm_i915_gem_exec_object2 *exec) { struct drm_i915_private *i915 = to_i915(dev); struct i915_execbuffer eb; @@ -2392,10 +3167,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.i915 = i915; eb.file = file; eb.args = args; - if (!(args->flags & I915_EXEC_NO_RELOC)) + if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; + eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); + eb.vma[0].vma = NULL; + eb.reloc_pool = eb.batch_pool = NULL; + eb.reloc_context = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; reloc_cache_init(&eb.reloc_cache, eb.i915); @@ -2405,6 +3184,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_len = args->batch_len; eb.trampoline = NULL; + eb.fences = NULL; + eb.num_fences = 0; + eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { if (INTEL_GEN(i915) >= 11) @@ -2422,14 +3204,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (args->flags & I915_EXEC_IS_PINNED) eb.batch_flags |= I915_DISPATCH_PINNED; + err = parse_execbuf2_extensions(args, &eb); + if (err) + goto err_ext; + + err = add_fence_array(&eb); + if (err) + goto err_ext; + #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT) if (args->flags & IN_FENCES) { if ((args->flags & IN_FENCES) == IN_FENCES) return -EINVAL; in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); - if (!in_fence) - return -EINVAL; + if (!in_fence) { + err = -EINVAL; + goto err_ext; + } } #undef IN_FENCES @@ -2451,11 +3243,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_destroy; - err = eb_pin_engine(&eb, file, args); + err = eb_select_engine(&eb); if (unlikely(err)) goto err_context; - err = eb_relocate(&eb); + err = eb_lookup_vmas(&eb); + if (err) { + eb_release_vmas(&eb, true); + goto err_engine; + } + + i915_gem_ww_ctx_init(&eb.ww, true); + + err = eb_relocate_parse(&eb); if (err) { /* * If the user expects the execobject.offset and @@ -2468,54 +3268,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) { - drm_dbg(&i915->drm, - "Attempting to use self-modifying batch buffer\n"); - err = -EINVAL; - goto err_vma; - } - - if (range_overflows_t(u64, - eb.batch_start_offset, eb.batch_len, - eb.batch->vma->size)) { - drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); - err = -EINVAL; - goto err_vma; - } - - if (eb.batch_len == 0) - eb.batch_len = eb.batch->vma->size - eb.batch_start_offset; - - err = eb_parse(&eb); - if (err) - goto err_vma; + ww_acquire_done(&eb.ww.ctx); - /* - * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure - * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but bdw mucks it up again. */ batch = eb.batch->vma; - if (eb.batch_flags & I915_DISPATCH_SECURE) { - struct i915_vma *vma; - - /* - * So on first glance it looks freaky that we pin the batch here - * outside of the reservation loop. But: - * - The batch is already pinned into the relevant ppgtt, so we - * already have the backing storage fully allocated. - * - No other BO uses the global gtt (well contexts, but meh), - * so we don't really have issues with multiple objects not - * fitting due to fragmentation. - * So this is actually safe. - */ - vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_parse; - } - - batch = vma; - } /* All GPU relocation batches must be submitted prior to the user rq */ GEM_BUG_ON(eb.reloc_cache.rq); @@ -2524,7 +3279,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.request = i915_request_create(eb.context); if (IS_ERR(eb.request)) { err = PTR_ERR(eb.request); - goto err_batch_unpin; + goto err_vma; } if (in_fence) { @@ -2539,8 +3294,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_request; } - if (fences) { - err = await_fence_array(&eb, fences); + if (eb.fences) { + err = await_fence_array(&eb); if (err) goto err_request; } @@ -2561,18 +3316,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, * to explicitly hold another reference here. */ eb.request->batch = batch; - if (batch->private) - intel_gt_buffer_pool_mark_active(batch->private, eb.request); + if (eb.batch_pool) + intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request); trace_i915_request_queue(eb.request, eb.batch_flags); err = eb_submit(&eb, batch); err_request: - add_to_client(eb.request, file); i915_request_get(eb.request); eb_request_add(&eb); - if (fences) - signal_fence_array(&eb, fences); + if (eb.fences) + signal_fence_array(&eb); if (out_fence) { if (err == 0) { @@ -2586,16 +3340,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, } i915_request_put(eb.request); -err_batch_unpin: - if (eb.batch_flags & I915_DISPATCH_SECURE) - i915_vma_unpin(batch); -err_parse: - if (batch->private) - intel_gt_buffer_pool_put(batch->private); err_vma: + eb_release_vmas(&eb, true); if (eb.trampoline) i915_vma_unpin(eb.trampoline); - eb_unpin_engine(&eb); + WARN_ON(err == -EDEADLK); + i915_gem_ww_ctx_fini(&eb.ww); + + if (eb.batch_pool) + intel_gt_buffer_pool_put(eb.batch_pool); + if (eb.reloc_pool) + intel_gt_buffer_pool_put(eb.reloc_pool); + if (eb.reloc_context) + intel_context_put(eb.reloc_context); +err_engine: + eb_put_engine(&eb); err_context: i915_gem_context_put(eb.gem_context); err_destroy: @@ -2605,12 +3364,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, put_unused_fd(out_fence_fd); err_in_fence: dma_fence_put(in_fence); +err_ext: + put_fence_array(eb.fences, eb.num_fences); return err; } static size_t eb_element_size(void) { - return sizeof(struct drm_i915_gem_exec_object2); + return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); } static bool check_buffer_count(size_t count) @@ -2666,7 +3427,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, /* Copy in the exec list from userland */ exec_list = kvmalloc_array(count, sizeof(*exec_list), __GFP_NOWARN | GFP_KERNEL); - exec2_list = kvmalloc_array(count, eb_element_size(), + + /* Allocate extra slots for use by the command parser */ + exec2_list = kvmalloc_array(count + 2, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { drm_dbg(&i915->drm, @@ -2699,7 +3462,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, exec2_list[i].flags = 0; } - err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL); + err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list); if (exec2.flags & __EXEC_HAS_RELOC) { struct drm_i915_gem_exec_object __user *user_exec_list = u64_to_user_ptr(args->buffers_ptr); @@ -2731,7 +3494,6 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list; - struct drm_syncobj **fences = NULL; const size_t count = args->buffer_count; int err; @@ -2744,7 +3506,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, if (err) return err; - exec2_list = kvmalloc_array(count, eb_element_size(), + /* Allocate extra slots for use by the command parser */ + exec2_list = kvmalloc_array(count + 2, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", @@ -2759,15 +3522,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, return -EFAULT; } - if (args->flags & I915_EXEC_FENCE_ARRAY) { - fences = get_fence_array(args, file); - if (IS_ERR(fences)) { - kvfree(exec2_list); - return PTR_ERR(fences); - } - } - - err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences); + err = i915_gem_do_execbuffer(dev, file, args, exec2_list); /* * Now that we have begun execution of the batchbuffer, we ignore @@ -2808,7 +3563,6 @@ end:; } args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; - put_fence_array(args, fences); kvfree(exec2_list); return err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index b23368529a409cabe1dc092b887f96369c934754..3d69e51f3e4df0a3002a14d5ebf1995b35fed9a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -209,7 +209,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err) switch (err) { default: WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); - /* fallthrough */ + fallthrough; case -EIO: /* shmemfs failure from swap device */ case -EFAULT: /* purged object */ case -ENODEV: /* bad object, how did you get here! */ @@ -283,37 +283,46 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) struct intel_runtime_pm *rpm = &i915->runtime_pm; struct i915_ggtt *ggtt = &i915->ggtt; bool write = area->vm_flags & VM_WRITE; + struct i915_gem_ww_ctx ww; intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; int srcu; int ret; - /* Sanity check that we allow writing into this object */ - if (i915_gem_object_is_readonly(obj) && write) - return VM_FAULT_SIGBUS; - /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; trace_i915_gem_object_fault(obj, page_offset, true, write); - ret = i915_gem_object_pin_pages(obj); + wakeref = intel_runtime_pm_get(rpm); + + i915_gem_ww_ctx_init(&ww, true); +retry: + ret = i915_gem_object_lock(obj, &ww); if (ret) - goto err; + goto err_rpm; - wakeref = intel_runtime_pm_get(rpm); + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) { + ret = -EFAULT; + goto err_rpm; + } - ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); + ret = i915_gem_object_pin_pages(obj); if (ret) goto err_rpm; + ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); + if (ret) + goto err_pages; + /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK /* NOWARN */ | - PIN_NOEVICT); - if (IS_ERR(vma)) { + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK /* NOWARN */ | + PIN_NOEVICT); + if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); @@ -328,11 +337,11 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) * all hope that the hardware is able to track future writes. */ - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); - if (IS_ERR(vma)) { + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); + if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { flags = PIN_MAPPABLE; view.type = I915_GGTT_VIEW_PARTIAL; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); } /* The entire mappable GGTT is pinned? Unexpected! */ @@ -389,10 +398,16 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) __i915_vma_unpin(vma); err_reset: intel_gt_reset_unlock(ggtt->vm.gt, srcu); +err_pages: + i915_gem_object_unpin_pages(obj); err_rpm: + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); intel_runtime_pm_put(rpm, wakeref); - i915_gem_object_unpin_pages(obj); -err: return i915_error_to_vmf_fault(ret); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index e5b9276d254c01d054c5c94ea9acdeb2b578c72a..d46db8d8f38e4e85154f85571ac732b656b50213 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -110,20 +110,44 @@ i915_gem_object_put(struct drm_i915_gem_object *obj) #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv) -static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) +static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + bool intr) { - dma_resv_lock(obj->base.resv, NULL); + int ret; + + if (intr) + ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL); + else + ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL); + + if (!ret && ww) + list_add_tail(&obj->obj_link, &ww->obj_list); + if (ret == -EALREADY) + ret = 0; + + if (ret == -EDEADLK) + ww->contended = obj; + + return ret; } -static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) +static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { - return dma_resv_trylock(obj->base.resv); + return __i915_gem_object_lock(obj, ww, ww && ww->intr); } -static inline int -i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) +static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { - return dma_resv_lock_interruptible(obj->base.resv, NULL); + WARN_ON(ww && !ww->intr); + return __i915_gem_object_lock(obj, ww, true); +} + +static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) +{ + return dma_resv_trylock(obj->base.resv); } static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) @@ -258,6 +282,10 @@ struct page * i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n); +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, + unsigned int n); + dma_addr_t i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, unsigned long n, @@ -408,7 +436,6 @@ static inline void i915_gem_object_finish_access(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_pages(obj); - i915_gem_object_unlock(obj); } static inline struct intel_engine_cs * @@ -431,6 +458,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); +void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index bfdb32d46877b541072c56277760e6d3c7584037..d93eb36160c909f7f0a97784b1d20576adcd885b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -14,6 +14,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, struct i915_vma *vma, + struct i915_gem_ww_ctx *ww, u32 value) { struct drm_i915_private *i915 = ce->vm->i915; @@ -39,10 +40,24 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, goto out_pm; } + err = i915_gem_object_lock(pool->obj, ww); + if (err) + goto out_put; + + batch = i915_vma_instance(pool->obj, ce->vm, NULL); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_put; + } + + err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER); + if (unlikely(err)) + goto out_put; + cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto out_put; + goto out_unpin; } rem = vma->size; @@ -84,19 +99,11 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, intel_gt_chipset_flush(ce->vm->gt); - batch = i915_vma_instance(pool->obj, ce->vm, NULL); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_put; - } - - err = i915_vma_pin(batch, 0, 0, PIN_USER); - if (unlikely(err)) - goto out_put; - batch->private = pool; return batch; +out_unpin: + i915_vma_unpin(batch); out_put: intel_gt_buffer_pool_put(pool); out_pm: @@ -108,11 +115,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) { int err; - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, false); if (err == 0) err = i915_vma_move_to_active(vma, rq, 0); - i915_vma_unlock(vma); if (unlikely(err)) return err; @@ -141,6 +146,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, struct intel_context *ce, u32 value) { + struct i915_gem_ww_ctx ww; struct i915_request *rq; struct i915_vma *batch; struct i915_vma *vma; @@ -150,17 +156,28 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (unlikely(err)) - return err; + i915_gem_ww_ctx_init(&ww, true); + intel_engine_pm_get(ce->engine); +retry: + err = i915_gem_object_lock(obj, &ww); + if (err) + goto out; - batch = intel_emit_vma_fill_blt(ce, vma, value); + err = intel_context_pin_ww(ce, &ww); + if (err) + goto out; + + err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); + if (err) + goto out_ctx; + + batch = intel_emit_vma_fill_blt(ce, vma, &ww, value); if (IS_ERR(batch)) { err = PTR_ERR(batch); - goto out_unpin; + goto out_vma; } - rq = intel_context_create_request(ce); + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_batch; @@ -170,11 +187,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, if (unlikely(err)) goto out_request; - i915_vma_lock(vma); err = move_obj_to_gpu(vma->obj, rq, true); if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); if (unlikely(err)) goto out_request; @@ -193,8 +208,18 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, i915_request_add(rq); out_batch: intel_emit_vma_release(ce, batch); -out_unpin: +out_vma: i915_vma_unpin(vma); +out_ctx: + intel_context_unpin(ce); +out: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + intel_engine_pm_put(ce->engine); return err; } @@ -210,6 +235,7 @@ static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size) } struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, struct i915_vma *src, struct i915_vma *dst) { @@ -236,10 +262,24 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, goto out_pm; } + err = i915_gem_object_lock(pool->obj, ww); + if (err) + goto out_put; + + batch = i915_vma_instance(pool->obj, ce->vm, NULL); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_put; + } + + err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER); + if (unlikely(err)) + goto out_put; + cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto out_put; + goto out_unpin; } rem = src->size; @@ -296,20 +336,11 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, i915_gem_object_unpin_map(pool->obj); intel_gt_chipset_flush(ce->vm->gt); - - batch = i915_vma_instance(pool->obj, ce->vm, NULL); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_put; - } - - err = i915_vma_pin(batch, 0, 0, PIN_USER); - if (unlikely(err)) - goto out_put; - batch->private = pool; return batch; +out_unpin: + i915_vma_unpin(batch); out_put: intel_gt_buffer_pool_put(pool); out_pm: @@ -321,10 +352,9 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, struct drm_i915_gem_object *dst, struct intel_context *ce) { - struct drm_gem_object *objs[] = { &src->base, &dst->base }; struct i915_address_space *vm = ce->vm; struct i915_vma *vma[2], *batch; - struct ww_acquire_ctx acquire; + struct i915_gem_ww_ctx ww; struct i915_request *rq; int err, i; @@ -332,25 +362,36 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, if (IS_ERR(vma[0])) return PTR_ERR(vma[0]); - err = i915_vma_pin(vma[0], 0, 0, PIN_USER); - if (unlikely(err)) - return err; - vma[1] = i915_vma_instance(dst, vm, NULL); if (IS_ERR(vma[1])) - goto out_unpin_src; + return PTR_ERR(vma); - err = i915_vma_pin(vma[1], 0, 0, PIN_USER); + i915_gem_ww_ctx_init(&ww, true); + intel_engine_pm_get(ce->engine); +retry: + err = i915_gem_object_lock(src, &ww); + if (!err) + err = i915_gem_object_lock(dst, &ww); + if (!err) + err = intel_context_pin_ww(ce, &ww); + if (err) + goto out; + + err = i915_vma_pin_ww(vma[0], &ww, 0, 0, PIN_USER); + if (err) + goto out_ctx; + + err = i915_vma_pin_ww(vma[1], &ww, 0, 0, PIN_USER); if (unlikely(err)) goto out_unpin_src; - batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]); + batch = intel_emit_vma_copy_blt(ce, &ww, vma[0], vma[1]); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto out_unpin_dst; } - rq = intel_context_create_request(ce); + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_batch; @@ -360,14 +401,10 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, if (unlikely(err)) goto out_request; - err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire); - if (unlikely(err)) - goto out_request; - for (i = 0; i < ARRAY_SIZE(vma); i++) { err = move_obj_to_gpu(vma[i]->obj, rq, i); if (unlikely(err)) - goto out_unlock; + goto out_request; } for (i = 0; i < ARRAY_SIZE(vma); i++) { @@ -375,20 +412,19 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, err = i915_vma_move_to_active(vma[i], rq, flags); if (unlikely(err)) - goto out_unlock; + goto out_request; } if (rq->engine->emit_init_breadcrumb) { err = rq->engine->emit_init_breadcrumb(rq); if (unlikely(err)) - goto out_unlock; + goto out_request; } err = rq->engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0); -out_unlock: - drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire); + out_request: if (unlikely(err)) i915_request_set_error_once(rq, err); @@ -400,6 +436,16 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, i915_vma_unpin(vma[1]); out_unpin_src: i915_vma_unpin(vma[0]); +out_ctx: + intel_context_unpin(ce); +out: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + intel_engine_pm_put(ce->engine); return err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h index 8bcd336a90dc6e77d323b2537112bebf14a8ac87..2409fdcccf0ed2d38e8106bd0a4e180dd09f1af7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h @@ -13,12 +13,15 @@ #include "i915_vma.h" struct drm_i915_gem_object; +struct i915_gem_ww_ctx; struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, struct i915_vma *vma, + struct i915_gem_ww_ctx *ww, u32 value); struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, struct i915_vma *src, struct i915_vma *dst); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 5335f799b5482be1d085d6ca5c6c1441807af8a1..b5c15557cc87b8eaced85355b8e1eb5d4abfcabd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -123,6 +123,15 @@ struct drm_i915_gem_object { struct list_head lut_list; spinlock_t lut_lock; /* guards lut_list */ + /** + * @obj_link: Link into @i915_gem_ww_ctx.obj_list + * + * When we lock this object through i915_gem_object_lock() with a + * context, we add it to the list to ensure we can unlock everything + * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called. + */ + struct list_head obj_link; + /** Stolen memory for this object, instead of being backed by shmem. */ struct drm_mm_node *stolen; union { @@ -282,6 +291,7 @@ struct drm_i915_gem_object { } userptr; unsigned long scratch; + u64 encode; void *gvt_info; }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7050519c87a4f53ed788b65bd03ddf3edab385dd..e8a083743e09276920a009dab4a6eeea79138cc9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -276,7 +276,7 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, switch (type) { default: MISSING_CASE(type); - /* fallthrough - to use PAGE_KERNEL anyway */ + fallthrough; /* to use PAGE_KERNEL anyway */ case I915_MAP_WB: pgprot = PAGE_KERNEL; break; @@ -548,6 +548,20 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) return nth_page(sg_page(sg), offset); } +/* Like i915_gem_object_get_page(), but mark the returned page dirty */ +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, + unsigned int n) +{ + struct page *page; + + page = i915_gem_object_get_page(obj, n); + if (!obj->mm.dirty) + set_page_dirty(page); + + return page; +} + dma_addr_t i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, unsigned long n, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 3d215164dd5a435653e42858f7ce5c4dae3a742d..40d3e40500fa002ce0b4dcdbfb9840314f9550fc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -84,7 +84,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); drm_WARN_ON(&i915->drm, i915_gem_object_set_to_gtt_domain(obj, false)); i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index e0f21f12d3ce4a7bbf9b80b2518e47bd4f956f27..0be5e86833371b16d6a532d83f1a3eb0e37c03f8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -249,7 +249,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915, switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { default: MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); - /* fall through */ + fallthrough; case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; @@ -416,7 +416,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) case 4: if (!IS_G4X(i915)) break; - /* fall through */ + fallthrough; case 5: g4x_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); @@ -445,7 +445,7 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) break; default: MISSING_CASE(INTEL_GEN(i915)); - /* fall-through */ + fallthrough; case 11: case 12: icl_get_stolen_reserved(i915, uncore, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index 540ef0551789f1c267c25813ad61218a8954c68e..1929d6cf415082ef5931f74896630948d55e1eb9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -9,6 +9,7 @@ #include #include "i915_drv.h" +#include "i915_gem_context.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -35,9 +36,10 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; struct drm_i915_file_private *file_priv = file->driver_priv; - unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; - struct i915_request *request, *target = NULL; + struct i915_gem_context *ctx; + unsigned long idx; long ret; /* ABI: return -EIO if already wedged */ @@ -45,27 +47,54 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - spin_lock(&file_priv->mm.lock); - list_for_each_entry(request, &file_priv->mm.request_list, client_link) { - if (time_after_eq(request->emitted_jiffies, recent_enough)) - break; + rcu_read_lock(); + xa_for_each(&file_priv->context_xa, idx, ctx) { + struct i915_gem_engines_iter it; + struct intel_context *ce; - if (target && xchg(&target->file_priv, NULL)) - list_del(&target->client_link); + if (!kref_get_unless_zero(&ctx->ref)) + continue; + rcu_read_unlock(); - target = request; - } - if (target) - i915_request_get(target); - spin_unlock(&file_priv->mm.lock); + for_each_gem_engine(ce, + i915_gem_context_lock_engines(ctx), + it) { + struct i915_request *rq, *target = NULL; + + if (!ce->timeline) + continue; + + mutex_lock(&ce->timeline->mutex); + list_for_each_entry_reverse(rq, + &ce->timeline->requests, + link) { + if (i915_request_completed(rq)) + break; - if (!target) - return 0; + if (time_after(rq->emitted_jiffies, + recent_enough)) + continue; - ret = i915_request_wait(target, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - i915_request_put(target); + target = i915_request_get(rq); + break; + } + mutex_unlock(&ce->timeline->mutex); + if (!target) + continue; + + ret = i915_request_wait(target, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + i915_request_put(target); + if (ret < 0) + break; + } + i915_gem_context_unlock_engines(ctx); + i915_gem_context_put(ctx); + + rcu_read_lock(); + } + rcu_read_unlock(); return ret < 0 ? ret : 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ff72ee2fd9cd83f3efb233b9eb1039a77cf5438f..ffcaee74a2493beec3d58fb2b224ca75b711118f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -249,7 +249,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, * whilst executing a fenced command for an untiled object. */ - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); if (i915_gem_object_is_framebuffer(obj)) { i915_gem_object_unlock(obj); return -EBUSY; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 2c2bf24140c97279cae7368cf0842fcf427fed74..12b30075134a81b5cd137bb1ad0268a6da05f1b6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -596,14 +596,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - /* - * Using __get_user_pages_fast() with a read-only - * access is questionable. A read-only page may be - * COW-broken, and then this might end up giving - * the wrong side of the COW.. - * - * We may or may not care. - */ if (pvec) { /* defer to worker if malloc fails */ if (!i915_gem_object_is_readonly(obj)) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 8291ede6902c47ead8720601c30b0ea6626f18e7..5daf4a2be422a598fd80d28d1fc43f4d269f3a87 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -393,7 +393,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) */ for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) { - unsigned int combination = 0; + unsigned int combination = SZ_4K; /* Required for ppGTT */ for (j = 0; j < ARRAY_SIZE(page_sizes); j++) { if (i & BIT(j)) @@ -947,7 +947,7 @@ static int gpu_write(struct intel_context *ce, { int err; - i915_gem_object_lock(vma->obj); + i915_gem_object_lock(vma->obj, NULL); err = i915_gem_object_set_to_gtt_domain(vma->obj, true); i915_gem_object_unlock(vma->obj); if (err) @@ -964,9 +964,10 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) unsigned long n; int err; + i915_gem_object_lock(obj, NULL); err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) - return err; + goto err_unlock; for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); @@ -986,6 +987,8 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) } i915_gem_object_finish_access(obj); +err_unlock: + i915_gem_object_unlock(obj); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 299c29e9ad86aa4c822e028caa9748aab4bf3a77..4e36d4897ea6512b7bdda79f81711741a3243cc9 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -75,7 +75,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine) if (err) goto err_unpin; - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (err) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 87d7d8aa080f7dcf6203d42d743e821414d1d38e..7049a6bbc03dd2430eecd4fdfce55e3477a22b23 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -27,9 +27,10 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) u32 *cpu; int err; + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush); if (err) - return err; + goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); @@ -46,7 +47,9 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) kunmap_atomic(map); i915_gem_object_finish_access(ctx->obj); - return 0; +out: + i915_gem_object_unlock(ctx->obj); + return err; } static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) @@ -57,9 +60,10 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) u32 *cpu; int err; + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush); if (err) - return err; + goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); @@ -73,7 +77,9 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) kunmap_atomic(map); i915_gem_object_finish_access(ctx->obj); - return 0; +out: + i915_gem_object_unlock(ctx->obj); + return err; } static int gtt_set(struct context *ctx, unsigned long offset, u32 v) @@ -82,7 +88,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v) u32 __iomem *map; int err = 0; - i915_gem_object_lock(ctx->obj); + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); i915_gem_object_unlock(ctx->obj); if (err) @@ -115,7 +121,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) u32 __iomem *map; int err = 0; - i915_gem_object_lock(ctx->obj); + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_gtt_domain(ctx->obj, false); i915_gem_object_unlock(ctx->obj); if (err) @@ -147,7 +153,7 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v) u32 *map; int err; - i915_gem_object_lock(ctx->obj); + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_wc_domain(ctx->obj, true); i915_gem_object_unlock(ctx->obj); if (err) @@ -170,7 +176,7 @@ static int wc_get(struct context *ctx, unsigned long offset, u32 *v) u32 *map; int err; - i915_gem_object_lock(ctx->obj); + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_wc_domain(ctx->obj, false); i915_gem_object_unlock(ctx->obj); if (err) @@ -193,27 +199,27 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) u32 *cs; int err; - i915_gem_object_lock(ctx->obj); + i915_gem_object_lock(ctx->obj, NULL); err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); - i915_gem_object_unlock(ctx->obj); if (err) - return err; + goto out_unlock; vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) - return PTR_ERR(vma); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_unlock; + } rq = intel_engine_create_kernel_request(ctx->engine); if (IS_ERR(rq)) { - i915_vma_unpin(vma); - return PTR_ERR(rq); + err = PTR_ERR(rq); + goto out_unpin; } cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { - i915_request_add(rq); - i915_vma_unpin(vma); - return PTR_ERR(cs); + err = PTR_ERR(cs); + goto out_rq; } if (INTEL_GEN(ctx->engine->i915) >= 8) { @@ -234,14 +240,16 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) } intel_ring_advance(rq, cs); - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, true); if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); - i915_vma_unpin(vma); +out_rq: i915_request_add(rq); +out_unpin: + i915_vma_unpin(vma); +out_unlock: + i915_gem_object_unlock(ctx->obj); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 7ffc3c751432987c7748401c2f5370db18425e75..99becb86abd331724ee271ecd5760d99a2b3260b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -461,9 +461,10 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) unsigned int n, m, need_flush; int err; + i915_gem_object_lock(obj, NULL); err = i915_gem_object_prepare_write(obj, &need_flush); if (err) - return err; + goto out; for (n = 0; n < real_page_count(obj); n++) { u32 *map; @@ -479,7 +480,9 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) i915_gem_object_finish_access(obj); obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; obj->write_domain = 0; - return 0; +out: + i915_gem_object_unlock(obj); + return err; } static noinline int cpu_check(struct drm_i915_gem_object *obj, @@ -488,9 +491,10 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, unsigned int n, m, needs_flush; int err; + i915_gem_object_lock(obj, NULL); err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) - return err; + goto out_unlock; for (n = 0; n < real_page_count(obj); n++) { u32 *map; @@ -527,6 +531,8 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, } i915_gem_object_finish_access(obj); +out_unlock: + i915_gem_object_unlock(obj); return err; } @@ -887,24 +893,15 @@ static int igt_shared_ctx_exec(void *arg) return err; } -static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) +static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *vma) { - struct drm_i915_gem_object *obj; u32 *cmd; - int err; - if (INTEL_GEN(vma->vm->i915) < 8) - return ERR_PTR(-EINVAL); + GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8); - obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } + cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB); + if (IS_ERR(cmd)) + return PTR_ERR(cmd); *cmd++ = MI_STORE_REGISTER_MEM_GEN8; *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); @@ -912,26 +909,12 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) *cmd++ = upper_32_bits(vma->node.start); *cmd = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(obj, 0, 64); - i915_gem_object_unpin_map(obj); + __i915_gem_object_flush_map(rpcs, 0, 64); + i915_gem_object_unpin_map(rpcs); intel_gt_chipset_flush(vma->vm->gt); - vma = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err; - - return vma; - -err: - i915_gem_object_put(obj); - return ERR_PTR(err); + return 0; } static int @@ -939,52 +922,68 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, struct intel_context *ce, struct i915_request **rq_out) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_request *rq; + struct i915_gem_ww_ctx ww; struct i915_vma *batch; struct i915_vma *vma; + struct drm_i915_gem_object *rpcs; int err; GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); + if (INTEL_GEN(i915) < 8) + return -EINVAL; + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - i915_gem_object_unlock(obj); - if (err) - return err; - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; + rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(rpcs)) + return PTR_ERR(rpcs); - batch = rpcs_query_batch(vma); + batch = i915_vma_instance(rpcs, ce->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); - goto err_vma; + goto err_put; } + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_gem_object_lock(rpcs, &ww); + if (!err) + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (!err) + err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); + if (err) + goto err_put; + + err = i915_vma_pin_ww(batch, &ww, 0, 0, PIN_USER); + if (err) + goto err_vma; + + err = rpcs_query_batch(rpcs, vma); + if (err) + goto err_batch; + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; } - i915_vma_lock(batch); err = i915_request_await_object(rq, batch->obj, false); if (err == 0) err = i915_vma_move_to_active(batch, rq, 0); - i915_vma_unlock(batch); if (err) goto skip_request; - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, true); if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); if (err) goto skip_request; @@ -1000,23 +999,24 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (err) goto skip_request; - i915_vma_unpin_and_release(&batch, 0); - i915_vma_unpin(vma); - *rq_out = i915_request_get(rq); - i915_request_add(rq); - - return 0; - skip_request: - i915_request_set_error_once(rq, err); + if (err) + i915_request_set_error_once(rq, err); i915_request_add(rq); err_batch: - i915_vma_unpin_and_release(&batch, 0); + i915_vma_unpin(batch); err_vma: i915_vma_unpin(vma); - +err_put: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + i915_gem_object_put(rpcs); return err; } @@ -1709,7 +1709,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, i915_request_add(rq); - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (err) @@ -1748,7 +1748,7 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out) if (!vm) return -ENODEV; - page = vm->scratch[0].base.page; + page = __px_page(vm->scratch[0]); if (!page) { pr_err("No scratch page!\n"); return -EINVAL; @@ -1914,8 +1914,8 @@ static int mock_context_barrier(void *arg) return -ENOMEM; counter = 0; - err = context_barrier_task(ctx, 0, - NULL, NULL, mock_barrier_task, &counter); + err = context_barrier_task(ctx, 0, NULL, NULL, NULL, + mock_barrier_task, &counter); if (err) { pr_err("Failed at line %d, err=%d\n", __LINE__, err); goto out; @@ -1927,11 +1927,8 @@ static int mock_context_barrier(void *arg) } counter = 0; - err = context_barrier_task(ctx, ALL_ENGINES, - skip_unused_engines, - NULL, - mock_barrier_task, - &counter); + err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines, + NULL, NULL, mock_barrier_task, &counter); if (err) { pr_err("Failed at line %d, err=%d\n", __LINE__, err); goto out; @@ -1951,8 +1948,8 @@ static int mock_context_barrier(void *arg) counter = 0; context_barrier_inject_fault = BIT(RCS0); - err = context_barrier_task(ctx, ALL_ENGINES, - NULL, NULL, mock_barrier_task, &counter); + err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL, + mock_barrier_task, &counter); context_barrier_inject_fault = 0; if (err == -ENXIO) err = 0; @@ -1966,11 +1963,8 @@ static int mock_context_barrier(void *arg) goto out; counter = 0; - err = context_barrier_task(ctx, ALL_ENGINES, - skip_unused_engines, - NULL, - mock_barrier_task, - &counter); + err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines, + NULL, NULL, mock_barrier_task, &counter); if (err) { pr_err("Failed at line %d, err=%d\n", __LINE__, err); goto out; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c index 57c14d3340cde50d456b3d123ca7392556a797ff..e1d50a5a1477ca7b2834f809483cf47fc4faedfc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c @@ -32,37 +32,39 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb, if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); + err = i915_gem_object_lock(obj, &eb->ww); + if (err) + return err; + + err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH); if (err) return err; /* 8-Byte aligned */ err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0); - if (err) - goto unpin_vma; + if (err <= 0) + goto reloc_err; /* !8-Byte aligned */ err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1); - if (err) - goto unpin_vma; + if (err <= 0) + goto reloc_err; /* Skip to the end of the cmd page */ - i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1; + i = PAGE_SIZE / sizeof(u32) - 1; i -= eb->reloc_cache.rq_size; memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size, MI_NOOP, i); eb->reloc_cache.rq_size += i; - /* Force batch chaining */ + /* Force next batch */ err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2); - if (err) - goto unpin_vma; + if (err <= 0) + goto reloc_err; GEM_BUG_ON(!eb->reloc_cache.rq); rq = i915_request_get(eb->reloc_cache.rq); - err = reloc_gpu_flush(&eb->reloc_cache); - if (err) - goto put_rq; + reloc_gpu_flush(eb, &eb->reloc_cache); GEM_BUG_ON(eb->reloc_cache.rq); err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2); @@ -94,6 +96,11 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb, unpin_vma: i915_vma_unpin(vma); return err; + +reloc_err: + if (!err) + err = -EIO; + goto unpin_vma; } static int igt_gpu_reloc(void *arg) @@ -115,6 +122,8 @@ static int igt_gpu_reloc(void *arg) goto err_scratch; } + intel_gt_pm_get(&eb.i915->gt); + for_each_uabi_engine(eb.engine, eb.i915) { reloc_cache_init(&eb.reloc_cache, eb.i915); memset(map, POISON_INUSE, 4096); @@ -125,15 +134,29 @@ static int igt_gpu_reloc(void *arg) err = PTR_ERR(eb.context); goto err_pm; } + eb.reloc_pool = NULL; + eb.reloc_context = NULL; - err = intel_context_pin(eb.context); - if (err) - goto err_put; + i915_gem_ww_ctx_init(&eb.ww, false); +retry: + err = intel_context_pin_ww(eb.context, &eb.ww); + if (!err) { + err = __igt_gpu_reloc(&eb, scratch); + + intel_context_unpin(eb.context); + } + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&eb.ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&eb.ww); - err = __igt_gpu_reloc(&eb, scratch); + if (eb.reloc_pool) + intel_gt_buffer_pool_put(eb.reloc_pool); + if (eb.reloc_context) + intel_context_put(eb.reloc_context); - intel_context_unpin(eb.context); -err_put: intel_context_put(eb.context); err_pm: intel_engine_pm_put(eb.engine); @@ -144,6 +167,7 @@ static int igt_gpu_reloc(void *arg) if (igt_flush_test(eb.i915)) err = -EIO; + intel_gt_pm_put(&eb.i915->gt); err_scratch: i915_gem_object_put(scratch); return err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 9c7402ce5bf9c51a855ffc14b23815830c5b6c74..d27d87a678c8fd1e7f8714550575c7ee68bd212a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -103,7 +103,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) { @@ -188,7 +188,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) { @@ -528,31 +528,42 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) for_each_uabi_engine(engine, i915) { struct i915_request *rq; struct i915_vma *vma; + struct i915_gem_ww_ctx ww; int err; vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_pin(vma, 0, 0, PIN_USER); + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) - return err; + goto err; rq = intel_engine_create_kernel_request(engine); if (IS_ERR(rq)) { - i915_vma_unpin(vma); - return PTR_ERR(rq); + err = PTR_ERR(rq); + goto err_unpin; } - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, true); if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); i915_request_add(rq); +err_unpin: i915_vma_unpin(vma); +err: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); if (err) return err; } @@ -1123,6 +1134,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, for_each_uabi_engine(engine, i915) { struct i915_request *rq; struct i915_vma *vma; + struct i915_gem_ww_ctx ww; vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); if (IS_ERR(vma)) { @@ -1130,9 +1142,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, goto out_unmap; } - err = i915_vma_pin(vma, 0, 0, PIN_USER); + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) - goto out_unmap; + goto out_ww; rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { @@ -1140,11 +1156,9 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, goto out_unpin; } - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, false); if (err == 0) err = i915_vma_move_to_active(vma, rq, 0); - i915_vma_unlock(vma); err = engine->emit_bb_start(rq, vma->node.start, 0, 0); i915_request_get(rq); @@ -1166,6 +1180,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, out_unpin: i915_vma_unpin(vma); +out_ww: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); if (err) goto out_unmap; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index 34932871b3a5987d238038f6c6541fbe58244e64..a94243dc4c5cfbc375a68459f0c343a1fb15e87b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -44,7 +44,7 @@ static int mock_phys_object(void *arg) } /* Make the object dirty so that put_pages must do copy back the data */ - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) { diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index cdc0b9c54305b79eb6f161cc2fee30d1ee9991f6..fd0d24d28763f11ebadccd62318e2172f731274c 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -16,8 +16,10 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, const unsigned int pde, const struct i915_page_table *pt) { + dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); + /* Caller needs to make sure the write completes if necessary */ - iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, + iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID, ppgtt->pd_addr + pde); } @@ -79,7 +81,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, { struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - const gen6_pte_t scratch_pte = vm->scratch[0].encode; + const gen6_pte_t scratch_pte = vm->scratch[0]->encode; unsigned int pde = first_entry / GEN6_PTES; unsigned int pte = first_entry % GEN6_PTES; unsigned int num_entries = length / I915_GTT_PAGE_SIZE; @@ -90,8 +92,6 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, const unsigned int count = min(num_entries, GEN6_PTES - pte); gen6_pte_t *vaddr; - GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); - num_entries -= count; GEM_BUG_ON(count > atomic_read(&pt->used)); @@ -127,7 +127,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sgt_dma iter = sgt_dma(vma); gen6_pte_t *vaddr; - GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); + GEM_BUG_ON(!pd->entry[act_pt]); vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { @@ -177,39 +177,36 @@ static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end) mutex_unlock(&ppgtt->flush); } -static int gen6_alloc_va_range(struct i915_address_space *vm, - u64 start, u64 length) +static void gen6_alloc_va_range(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + u64 start, u64 length) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *pt, *alloc = NULL; + struct i915_page_table *pt; bool flush = false; u64 from = start; unsigned int pde; - int ret = 0; spin_lock(&pd->lock); gen6_for_each_pde(pt, pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); - if (px_base(pt) == px_base(&vm->scratch[1])) { + if (!pt) { spin_unlock(&pd->lock); - pt = fetch_and_zero(&alloc); - if (!pt) - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto unwind_out; - } + pt = stash->pt[0]; + __i915_gem_object_pin_pages(pt->base); + i915_gem_object_make_unshrinkable(pt->base); - fill32_px(pt, vm->scratch[0].encode); + fill32_px(pt, vm->scratch[0]->encode); spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch[1]) { + if (!pd->entry[pde]) { + stash->pt[0] = pt->stash; + atomic_set(&pt->used, 0); pd->entry[pde] = pt; } else { - alloc = pt; pt = pd->entry[pde]; } @@ -226,38 +223,32 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref) gen6_flush_pd(ppgtt, from, start); } - - goto out; - -unwind_out: - gen6_ppgtt_clear_range(vm, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - return ret; } static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) { struct i915_address_space * const vm = &ppgtt->base.vm; - struct i915_page_directory * const pd = ppgtt->base.pd; int ret; - ret = setup_scratch_page(vm, __GFP_HIGHMEM); + ret = setup_scratch_page(vm); if (ret) return ret; - vm->scratch[0].encode = - vm->pte_encode(px_dma(&vm->scratch[0]), + vm->scratch[0]->encode = + vm->pte_encode(px_dma(vm->scratch[0]), I915_CACHE_NONE, PTE_READ_ONLY); - if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { - cleanup_scratch_page(vm); - return -ENOMEM; + vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(vm->scratch[1])) + return PTR_ERR(vm->scratch[1]); + + ret = pin_pt_dma(vm, vm->scratch[1]); + if (ret) { + i915_gem_object_put(vm->scratch[1]); + return ret; } - fill32_px(&vm->scratch[1], vm->scratch[0].encode); - memset_p(pd->entry, &vm->scratch[1], I915_PDES); + fill32_px(vm->scratch[1], vm->scratch[0]->encode); return 0; } @@ -265,14 +256,12 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) { struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = - px_base(&ppgtt->base.vm.scratch[1]); struct i915_page_table *pt; u32 pde; gen6_for_all_pdes(pt, pd, pde) - if (px_base(pt) != scratch) - free_px(&ppgtt->base.vm, pt); + if (pt) + free_pt(&ppgtt->base.vm, pt); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm) @@ -286,7 +275,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) mutex_destroy(&ppgtt->flush); mutex_destroy(&ppgtt->pin_mutex); - kfree(ppgtt->base.pd); + + free_pd(&ppgtt->base.vm, ppgtt->base.pd); } static int pd_vma_set_pages(struct i915_vma *vma) @@ -302,28 +292,26 @@ static void pd_vma_clear_pages(struct i915_vma *vma) vma->pages = NULL; } -static int pd_vma_bind(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) +static void pd_vma_bind(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct gen6_ppgtt *ppgtt = vma->private; u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; - px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); + ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10; ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total); - return 0; } static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma) { struct gen6_ppgtt *ppgtt = vma->private; struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = - px_base(&ppgtt->base.vm.scratch[1]); struct i915_page_table *pt; unsigned int pde; @@ -332,11 +320,11 @@ static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma) /* Free all no longer used page tables */ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { - if (px_base(pt) == scratch || atomic_read(&pt->used)) + if (!pt || atomic_read(&pt->used)) continue; - free_px(&ppgtt->base.vm, pt); - pd->entry[pde] = scratch; + free_pt(&ppgtt->base.vm, pt); + pd->entry[pde] = NULL; } ppgtt->scan_for_unused_pt = false; @@ -380,7 +368,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) return vma; } -int gen6_ppgtt_pin(struct i915_ppgtt *base) +int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); int err; @@ -406,7 +394,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base) */ err = 0; if (!atomic_read(&ppgtt->pin_count)) - err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH); + err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH); if (!err) atomic_inc(&ppgtt->pin_count); mutex_unlock(&ppgtt->pin_mutex); @@ -448,6 +436,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) mutex_init(&ppgtt->pin_mutex); ppgtt_init(&ppgtt->base, gt); + ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t)); ppgtt->base.vm.top = 1; ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; @@ -456,9 +445,10 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; + ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); + ppgtt->base.pd = __alloc_pd(I915_PDES); if (!ppgtt->base.pd) { err = -ENOMEM; goto err_free; @@ -479,7 +469,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) err_scratch: free_scratch(&ppgtt->base.vm); err_pd: - kfree(ppgtt->base.pd); + free_pd(&ppgtt->base.vm, ppgtt->base.pd); err_free: mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt); diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h index 72e481806c962a317854574f2266b20ba0a06157..3357228f3304a0c729a59372a58f8e28c52e1124 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -8,12 +8,15 @@ #include "intel_gtt.h" +struct i915_gem_ww_ctx; + struct gen6_ppgtt { struct i915_ppgtt base; struct mutex flush; struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; + u32 pp_dir; atomic_t pin_count; struct mutex pin_mutex; @@ -66,7 +69,7 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) (pt = i915_pt_entry(pd, iter), true); \ ++iter) -int gen6_ppgtt_pin(struct i915_ppgtt *base); +int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww); void gen6_ppgtt_unpin(struct i915_ppgtt *base); void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); void gen6_ppgtt_enable(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 699125928272010e1f60359dbd9d1ae0c6d28142..eb64f474a78c6ac529f74a3c505d73514287e4a1 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -181,7 +181,7 @@ static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, } while (pde++, --count); } - free_px(vm, pd); + free_px(vm, &pd->pt, lvl); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) @@ -199,7 +199,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, struct i915_page_directory * const pd, u64 start, const u64 end, int lvl) { - const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + const struct drm_i915_gem_object * const scratch = vm->scratch[lvl]; unsigned int idx, len; GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); @@ -239,7 +239,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, vaddr = kmap_atomic_px(pt); memset64(vaddr + gen8_pd_index(start, 0), - vm->scratch[0].encode, + vm->scratch[0]->encode, count); kunmap_atomic(vaddr); @@ -248,7 +248,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, } if (release_pd_entry(pd, idx, pt, scratch)) - free_px(vm, pt); + free_px(vm, pt, lvl); } while (idx++, --len); return start; @@ -269,14 +269,12 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm, start, start + length, vm->top); } -static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, - struct i915_page_directory * const pd, - u64 * const start, const u64 end, int lvl) +static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, + struct i915_vm_pt_stash *stash, + struct i915_page_directory * const pd, + u64 * const start, const u64 end, int lvl) { - const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; - struct i915_page_table *alloc = NULL; unsigned int idx, len; - int ret = 0; GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT); @@ -297,49 +295,31 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", __func__, vm, lvl + 1, idx); - pt = fetch_and_zero(&alloc); - if (lvl) { - if (!pt) { - pt = &alloc_pd(vm)->pt; - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto out; - } - } - - fill_px(pt, vm->scratch[lvl].encode); - } else { - if (!pt) { - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto out; - } - } - - if (intel_vgpu_active(vm->i915) || - gen8_pt_count(*start, end) < I915_PDES) - fill_px(pt, vm->scratch[lvl].encode); - } + pt = stash->pt[!!lvl]; + __i915_gem_object_pin_pages(pt->base); + i915_gem_object_make_unshrinkable(pt->base); + + if (lvl || + gen8_pt_count(*start, end) < I915_PDES || + intel_vgpu_active(vm->i915)) + fill_px(pt, vm->scratch[lvl]->encode); spin_lock(&pd->lock); - if (likely(!pd->entry[idx])) + if (likely(!pd->entry[idx])) { + stash->pt[!!lvl] = pt->stash; + atomic_set(&pt->used, 0); set_pd_entry(pd, idx, pt); - else - alloc = pt, pt = pd->entry[idx]; + } else { + pt = pd->entry[idx]; + } } if (lvl) { atomic_inc(&pt->used); spin_unlock(&pd->lock); - ret = __gen8_ppgtt_alloc(vm, as_pd(pt), - start, end, lvl); - if (unlikely(ret)) { - if (release_pd_entry(pd, idx, pt, scratch)) - free_px(vm, pt); - goto out; - } + __gen8_ppgtt_alloc(vm, stash, + as_pd(pt), start, end, lvl); spin_lock(&pd->lock); atomic_dec(&pt->used); @@ -359,18 +339,12 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, } } while (idx++, --len); spin_unlock(&pd->lock); -out: - if (alloc) - free_px(vm, alloc); - return ret; } -static int gen8_ppgtt_alloc(struct i915_address_space *vm, - u64 start, u64 length) +static void gen8_ppgtt_alloc(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + u64 start, u64 length) { - u64 from; - int err; - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); GEM_BUG_ON(range_overflows(start, length, vm->total)); @@ -378,25 +352,9 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm, start >>= GEN8_PTE_SHIFT; length >>= GEN8_PTE_SHIFT; GEM_BUG_ON(length == 0); - from = start; - err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, - &start, start + length, vm->top); - if (unlikely(err && from != start)) - __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, - from, start, vm->top); - - return err; -} - -static __always_inline void -write_pte(gen8_pte_t *pte, const gen8_pte_t val) -{ - /* Magic delays? Or can we refine these to flush all in one pass? */ - *pte = val; - wmb(); /* cpu to cache */ - clflush(pte); /* cache to memory */ - wmb(); /* visible to all */ + __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd, + &start, start + length, vm->top); } static __always_inline u64 @@ -415,8 +373,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); - write_pte(&vaddr[gen8_pd_index(idx, 0)], - pte_encode | iter->dma); + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { @@ -439,10 +396,12 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = pdp->entry[gen8_pd_index(idx, 2)]; } + clflush_cache_range(vaddr, PAGE_SIZE); kunmap_atomic(vaddr); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); } } while (1); + clflush_cache_range(vaddr, PAGE_SIZE); kunmap_atomic(vaddr); return idx; @@ -498,7 +457,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, do { GEM_BUG_ON(iter->sg->length < page_size); - write_pte(&vaddr[index++], encode | iter->dma); + vaddr[index++] = encode | iter->dma; start += page_size; iter->dma += page_size; @@ -523,6 +482,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } } while (rem >= page_size && index < I915_PDES); + clflush_cache_range(vaddr, PAGE_SIZE); kunmap_atomic(vaddr); /* @@ -554,7 +514,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { u16 i; - encode = vma->vm->scratch[0].encode; + encode = vma->vm->scratch[0]->encode; vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); for (i = 1; i < index; i += 16) @@ -608,27 +568,37 @@ static int gen8_init_scratch(struct i915_address_space *vm) GEM_BUG_ON(!clone->has_read_only); vm->scratch_order = clone->scratch_order; - memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); - px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ + for (i = 0; i <= vm->top; i++) + vm->scratch[i] = i915_gem_object_get(clone->scratch[i]); + return 0; } - ret = setup_scratch_page(vm, __GFP_HIGHMEM); + ret = setup_scratch_page(vm); if (ret) return ret; - vm->scratch[0].encode = - gen8_pte_encode(px_dma(&vm->scratch[0]), + vm->scratch[0]->encode = + gen8_pte_encode(px_dma(vm->scratch[0]), I915_CACHE_LLC, vm->has_read_only); for (i = 1; i <= vm->top; i++) { - if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) + struct drm_i915_gem_object *obj; + + obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(obj)) + goto free_scratch; + + ret = pin_pt_dma(vm, obj); + if (ret) { + i915_gem_object_put(obj); goto free_scratch; + } - fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); - vm->scratch[i].encode = - gen8_pde_encode(px_dma(&vm->scratch[i]), - I915_CACHE_LLC); + fill_px(obj, vm->scratch[i - 1]->encode); + obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC); + + vm->scratch[i] = obj; } return 0; @@ -649,12 +619,20 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { struct i915_page_directory *pde; + int err; pde = alloc_pd(vm); if (IS_ERR(pde)) return PTR_ERR(pde); - fill_px(pde, vm->scratch[1].encode); + err = pin_pt_dma(vm, pde->pt.base); + if (err) { + i915_gem_object_put(pde->pt.base); + free_pd(vm, pde); + return err; + } + + fill_px(pde, vm->scratch[1]->encode); set_pd_entry(pd, idx, pde); atomic_inc(px_used(pde)); /* keep pinned */ } @@ -668,21 +646,32 @@ gen8_alloc_top_pd(struct i915_address_space *vm) { const unsigned int count = gen8_pd_top_count(vm); struct i915_page_directory *pd; + int err; - GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); + GEM_BUG_ON(count > I915_PDES); - pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); + pd = __alloc_pd(count); if (unlikely(!pd)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_page_dma(vm, px_base(pd)))) { - kfree(pd); - return ERR_PTR(-ENOMEM); + pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(pd->pt.base)) { + err = PTR_ERR(pd->pt.base); + pd->pt.base = NULL; + goto err_pd; } - fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); + err = pin_pt_dma(vm, pd->pt.base); + if (err) + goto err_pd; + + fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count); atomic_inc(px_used(pd)); /* mark as pinned */ return pd; + +err_pd: + free_pd(vm, pd); + return ERR_PTR(err); } /* @@ -703,6 +692,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) ppgtt_init(ppgtt, gt); ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; + ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t)); /* * From bdw, there is hw support for read-only pages in the PPGTT. @@ -714,12 +704,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) */ ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12); - /* - * There are only few exceptions for gen >=6. chv and bxt. - * And we are not sure about the latter so play safe for now. - */ - if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915)) - ppgtt->vm.pt_kmap_wc = true; + ppgtt->vm.alloc_pt_dma = alloc_pt_dma; err = gen8_init_scratch(&ppgtt->vm); if (err) diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 91786310c11484c43b4c5c538ffb83189986d264..d8b206e53660a9d0457b0d790dc8d4e8ea5e2a0f 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -28,6 +28,8 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "intel_breadcrumbs.h" +#include "intel_context.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" @@ -53,33 +55,65 @@ static void irq_disable(struct intel_engine_cs *engine) spin_unlock(&engine->gt->irq_lock); } -static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - struct intel_engine_cs *engine = - container_of(b, struct intel_engine_cs, breadcrumbs); + lockdep_assert_held(&b->irq_lock); + + if (!b->irq_engine || b->irq_armed) + return; + + if (!intel_gt_pm_get_if_awake(b->irq_engine->gt)) + return; + + /* + * The breadcrumb irq will be disarmed on the interrupt after the + * waiters are signaled. This gives us a single interrupt window in + * which we can add a new waiter and avoid the cost of re-enabling + * the irq. + */ + WRITE_ONCE(b->irq_armed, true); + + /* + * Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. This is tracked + * by i915->gt.awake, we can forgo holding our own wakref + * for the interrupt as before i915->gt.awake is released (when + * the driver is idle) we disarm the breadcrumbs. + */ + if (!b->irq_enabled++) + irq_enable(b->irq_engine); +} + +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ lockdep_assert_held(&b->irq_lock); + if (!b->irq_engine || !b->irq_armed) + return; + GEM_BUG_ON(!b->irq_enabled); if (!--b->irq_enabled) - irq_disable(engine); + irq_disable(b->irq_engine); WRITE_ONCE(b->irq_armed, false); - intel_gt_pm_put_async(engine->gt); + intel_gt_pm_put_async(b->irq_engine->gt); } -void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) +static void add_signaling_context(struct intel_breadcrumbs *b, + struct intel_context *ce) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; - unsigned long flags; - - if (!READ_ONCE(b->irq_armed)) - return; + intel_context_get(ce); + list_add_tail(&ce->signal_link, &b->signalers); + if (list_is_first(&ce->signal_link, &b->signalers)) + __intel_breadcrumbs_arm_irq(b); +} - spin_lock_irqsave(&b->irq_lock, flags); - if (b->irq_armed) - __intel_breadcrumbs_disarm_irq(b); - spin_unlock_irqrestore(&b->irq_lock, flags); +static void remove_signaling_context(struct intel_breadcrumbs *b, + struct intel_context *ce) +{ + list_del(&ce->signal_link); + intel_context_put(ce); } static inline bool __request_completed(const struct i915_request *rq) @@ -90,6 +124,9 @@ static inline bool __request_completed(const struct i915_request *rq) __maybe_unused static bool check_signal_order(struct intel_context *ce, struct i915_request *rq) { + if (rq->context != ce) + return false; + if (!list_is_last(&rq->signal_link, &ce->signals) && i915_seqno_passed(rq->fence.seqno, list_next_entry(rq, signal_link)->fence.seqno)) @@ -133,25 +170,21 @@ __dma_fence_signal__notify(struct dma_fence *fence, static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) { - struct intel_engine_cs *engine = - container_of(b, struct intel_engine_cs, breadcrumbs); - - if (unlikely(intel_engine_is_virtual(engine))) - engine = intel_virtual_engine_get_sibling(engine, 0); - - intel_engine_add_retire(engine, tl); + if (b->irq_engine) + intel_engine_add_retire(b->irq_engine, tl); } -static void __signal_request(struct i915_request *rq, struct list_head *signals) +static bool __signal_request(struct i915_request *rq, struct list_head *signals) { - GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - if (!__dma_fence_signal(&rq->fence)) - return; + if (!__dma_fence_signal(&rq->fence)) { + i915_request_put(rq); + return false; + } - i915_request_get(rq); list_add_tail(&rq->signal_link, signals); + return true; } static void signal_irq_work(struct irq_work *work) @@ -164,7 +197,7 @@ static void signal_irq_work(struct irq_work *work) spin_lock(&b->irq_lock); - if (b->irq_armed && list_empty(&b->signalers)) + if (list_empty(&b->signalers)) __intel_breadcrumbs_disarm_irq(b); list_splice_init(&b->signaled_requests, &signal); @@ -197,8 +230,8 @@ static void signal_irq_work(struct irq_work *work) /* Advance the list to the first incomplete request */ __list_del_many(&ce->signals, pos); if (&ce->signals == pos) { /* now empty */ - list_del_init(&ce->signal_link); add_retire(b, ce->timeline); + remove_signaling_context(b, ce); } } } @@ -220,116 +253,89 @@ static void signal_irq_work(struct irq_work *work) } } -static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) +struct intel_breadcrumbs * +intel_breadcrumbs_create(struct intel_engine_cs *irq_engine) { - struct intel_engine_cs *engine = - container_of(b, struct intel_engine_cs, breadcrumbs); - - lockdep_assert_held(&b->irq_lock); - if (b->irq_armed) - return true; - - if (!intel_gt_pm_get_if_awake(engine->gt)) - return false; - - /* - * The breadcrumb irq will be disarmed on the interrupt after the - * waiters are signaled. This gives us a single interrupt window in - * which we can add a new waiter and avoid the cost of re-enabling - * the irq. - */ - WRITE_ONCE(b->irq_armed, true); - - /* - * Since we are waiting on a request, the GPU should be busy - * and should have its own rpm reference. This is tracked - * by i915->gt.awake, we can forgo holding our own wakref - * for the interrupt as before i915->gt.awake is released (when - * the driver is idle) we disarm the breadcrumbs. - */ - - if (!b->irq_enabled++) - irq_enable(engine); + struct intel_breadcrumbs *b; - return true; -} - -void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) -{ - struct intel_breadcrumbs *b = &engine->breadcrumbs; + b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) + return NULL; spin_lock_init(&b->irq_lock); INIT_LIST_HEAD(&b->signalers); INIT_LIST_HEAD(&b->signaled_requests); init_irq_work(&b->irq_work, signal_irq_work); + + b->irq_engine = irq_engine; + + return b; } -void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) +void intel_breadcrumbs_reset(struct intel_breadcrumbs *b) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; unsigned long flags; + if (!b->irq_engine) + return; + spin_lock_irqsave(&b->irq_lock, flags); if (b->irq_enabled) - irq_enable(engine); + irq_enable(b->irq_engine); else - irq_disable(engine); + irq_disable(b->irq_engine); spin_unlock_irqrestore(&b->irq_lock, flags); } -void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine, - struct intel_context *ce) +void intel_breadcrumbs_park(struct intel_breadcrumbs *b) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; unsigned long flags; - spin_lock_irqsave(&b->irq_lock, flags); - if (!list_empty(&ce->signals)) { - struct i915_request *rq, *next; - - /* Queue for executing the signal callbacks in the irq_work */ - list_for_each_entry_safe(rq, next, &ce->signals, signal_link) { - GEM_BUG_ON(rq->engine != engine); - GEM_BUG_ON(!__request_completed(rq)); - - __signal_request(rq, &b->signaled_requests); - } + if (!READ_ONCE(b->irq_armed)) + return; - INIT_LIST_HEAD(&ce->signals); - list_del_init(&ce->signal_link); + spin_lock_irqsave(&b->irq_lock, flags); + __intel_breadcrumbs_disarm_irq(b); + spin_unlock_irqrestore(&b->irq_lock, flags); + if (!list_empty(&b->signalers)) irq_work_queue(&b->irq_work); - } - spin_unlock_irqrestore(&b->irq_lock, flags); } -void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) +void intel_breadcrumbs_free(struct intel_breadcrumbs *b) { + kfree(b); } -bool i915_request_enable_breadcrumb(struct i915_request *rq) +static void insert_breadcrumb(struct i915_request *rq, + struct intel_breadcrumbs *b) { - lockdep_assert_held(&rq->lock); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) - return true; + struct intel_context *ce = rq->context; + struct list_head *pos; - if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { - struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - struct intel_context *ce = rq->context; - struct list_head *pos; - - spin_lock(&b->irq_lock); + if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + return; - if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) - goto unlock; + i915_request_get(rq); - if (!__intel_breadcrumbs_arm_irq(b)) - goto unlock; + /* + * If the request is already completed, we can transfer it + * straight onto a signaled list, and queue the irq worker for + * its signal completion. + */ + if (__request_completed(rq)) { + if (__signal_request(rq, &b->signaled_requests)) + irq_work_queue(&b->irq_work); + return; + } + if (list_empty(&ce->signals)) { + add_signaling_context(b, ce); + pos = &ce->signals; + } else { /* * We keep the seqno in retirement order, so we can break * inside intel_engine_signal_breadcrumbs as soon as we've @@ -351,24 +357,75 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno)) break; } - list_add(&rq->signal_link, pos); - if (pos == &ce->signals) /* catch transitions from empty list */ - list_move_tail(&ce->signal_link, &b->signalers); - GEM_BUG_ON(!check_signal_order(ce, rq)); + } + list_add(&rq->signal_link, pos); + GEM_BUG_ON(!check_signal_order(ce, rq)); + set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + + /* Check after attaching to irq, interrupt may have already fired. */ + if (__request_completed(rq)) + irq_work_queue(&b->irq_work); +} - set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); -unlock: +bool i915_request_enable_breadcrumb(struct i915_request *rq) +{ + struct intel_breadcrumbs *b; + + /* Serialises with i915_request_retire() using rq->lock */ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + return true; + + /* + * Peek at i915_request_submit()/i915_request_unsubmit() status. + * + * If the request is not yet active (and not signaled), we will + * attach the breadcrumb later. + */ + if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) + return true; + + /* + * rq->engine is locked by rq->engine->active.lock. That however + * is not known until after rq->engine has been dereferenced and + * the lock acquired. Hence we acquire the lock and then validate + * that rq->engine still matches the lock we hold for it. + * + * Here, we are using the breadcrumb lock as a proxy for the + * rq->engine->active.lock, and we know that since the breadcrumb + * will be serialised within i915_request_submit/i915_request_unsubmit, + * the engine cannot change while active as long as we hold the + * breadcrumb lock on that engine. + * + * From the dma_fence_enable_signaling() path, we are outside of the + * request submit/unsubmit path, and so we must be more careful to + * acquire the right lock. + */ + b = READ_ONCE(rq->engine)->breadcrumbs; + spin_lock(&b->irq_lock); + while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) { spin_unlock(&b->irq_lock); + b = READ_ONCE(rq->engine)->breadcrumbs; + spin_lock(&b->irq_lock); } - return !__request_completed(rq); + /* + * Now that we are finally serialised with request submit/unsubmit, + * [with b->irq_lock] and with i915_request_retire() [via checking + * SIGNALED with rq->lock] confirm the request is indeed active. If + * it is no longer active, the breadcrumb will be attached upon + * i915_request_submit(). + */ + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) + insert_breadcrumb(rq, b); + + spin_unlock(&b->irq_lock); + + return true; } void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - - lockdep_assert_held(&rq->lock); + struct intel_breadcrumbs *b = rq->engine->breadcrumbs; /* * We must wait for b->irq_lock so that we know the interrupt handler @@ -382,23 +439,19 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) list_del(&rq->signal_link); if (list_empty(&ce->signals)) - list_del_init(&ce->signal_link); + remove_signaling_context(b, ce); clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + i915_request_put(rq); } spin_unlock(&b->irq_lock); } -void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, - struct drm_printer *p) +static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) { - struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_context *ce; struct i915_request *rq; - if (list_empty(&b->signalers)) - return; - drm_printf(p, "Signals:\n"); spin_lock_irq(&b->irq_lock); @@ -414,3 +467,17 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, } spin_unlock_irq(&b->irq_lock); } + +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p) +{ + struct intel_breadcrumbs *b; + + b = engine->breadcrumbs; + if (!b) + return; + + drm_printf(p, "IRQ: %s\n", enableddisabled(b->irq_armed)); + if (!list_empty(&b->signalers)) + print_signals(b, p); +} diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h new file mode 100644 index 0000000000000000000000000000000000000000..ed3d1deabfbd8773445e3e13b1afd81c2f830a5a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_BREADCRUMBS__ +#define __INTEL_BREADCRUMBS__ + +#include + +#include "intel_engine_types.h" + +struct drm_printer; +struct i915_request; +struct intel_breadcrumbs; + +struct intel_breadcrumbs * +intel_breadcrumbs_create(struct intel_engine_cs *irq_engine); +void intel_breadcrumbs_free(struct intel_breadcrumbs *b); + +void intel_breadcrumbs_reset(struct intel_breadcrumbs *b); +void intel_breadcrumbs_park(struct intel_breadcrumbs *b); + +static inline void +intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) +{ + irq_work_queue(&engine->breadcrumbs->irq_work); +} + +void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, + struct drm_printer *p); + +bool i915_request_enable_breadcrumb(struct i915_request *request); +void i915_request_cancel_breadcrumb(struct i915_request *request); + +#endif /* __INTEL_BREADCRUMBS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h new file mode 100644 index 0000000000000000000000000000000000000000..8e53b99426958ffe2ce462890ddc754f34f7cf19 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_BREADCRUMBS_TYPES__ +#define __INTEL_BREADCRUMBS_TYPES__ + +#include +#include +#include +#include + +/* + * Rather than have every client wait upon all user interrupts, + * with the herd waking after every interrupt and each doing the + * heavyweight seqno dance, we delegate the task (of being the + * bottom-half of the user interrupt) to the first client. After + * every interrupt, we wake up one client, who does the heavyweight + * coherent seqno read and either goes back to sleep (if incomplete), + * or wakes up all the completed clients in parallel, before then + * transferring the bottom-half status to the next client in the queue. + * + * Compared to walking the entire list of waiters in a single dedicated + * bottom-half, we reduce the latency of the first waiter by avoiding + * a context switch, but incur additional coherent seqno reads when + * following the chain of request breadcrumbs. Since it is most likely + * that we have a single client waiting on each seqno, then reducing + * the overhead of waking that client is much preferred. + */ +struct intel_breadcrumbs { + spinlock_t irq_lock; /* protects the lists used in hardirq context */ + + /* Not all breadcrumbs are attached to physical HW */ + struct intel_engine_cs *irq_engine; + + struct list_head signalers; + struct list_head signaled_requests; + + struct irq_work irq_work; /* for use from inside irq_lock */ + + unsigned int irq_enabled; + + bool irq_armed; +}; + +#endif /* __INTEL_BREADCRUMBS_TYPES__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 52db2bde44a3ab279b433847511c49afa901381c..d301dda1b2617b53f769f47b9ea1d5d1724449d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -93,57 +93,210 @@ static void intel_context_active_release(struct intel_context *ce) i915_active_release(&ce->active); } -int __intel_context_do_pin(struct intel_context *ce) +static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) +{ + unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; + int err; + + err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH); + if (err) + return err; + + err = i915_active_acquire(&vma->active); + if (err) + goto err_unpin; + + /* + * And mark it as a globally pinned object to let the shrinker know + * it cannot reclaim the object until we release it. + */ + i915_vma_make_unshrinkable(vma); + vma->obj->mm.dirty = true; + + return 0; + +err_unpin: + i915_vma_unpin(vma); + return err; +} + +static void __context_unpin_state(struct i915_vma *vma) +{ + i915_vma_make_shrinkable(vma); + i915_active_release(&vma->active); + __i915_vma_unpin(vma); +} + +static int __ring_active(struct intel_ring *ring, + struct i915_gem_ww_ctx *ww) +{ + int err; + + err = intel_ring_pin(ring, ww); + if (err) + return err; + + err = i915_active_acquire(&ring->vma->active); + if (err) + goto err_pin; + + return 0; + +err_pin: + intel_ring_unpin(ring); + return err; +} + +static void __ring_retire(struct intel_ring *ring) +{ + i915_active_release(&ring->vma->active); + intel_ring_unpin(ring); +} + +static int intel_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww) { int err; + CE_TRACE(ce, "active\n"); + + err = __ring_active(ce->ring, ww); + if (err) + return err; + + err = intel_timeline_pin(ce->timeline, ww); + if (err) + goto err_ring; + + if (!ce->state) + return 0; + + err = __context_pin_state(ce->state, ww); + if (err) + goto err_timeline; + + + return 0; + +err_timeline: + intel_timeline_unpin(ce->timeline); +err_ring: + __ring_retire(ce->ring); + return err; +} + +static void intel_context_post_unpin(struct intel_context *ce) +{ + if (ce->state) + __context_unpin_state(ce->state); + + intel_timeline_unpin(ce->timeline); + __ring_retire(ce->ring); +} + +int __intel_context_do_pin_ww(struct intel_context *ce, + struct i915_gem_ww_ctx *ww) +{ + bool handoff = false; + void *vaddr; + int err = 0; + if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { err = intel_context_alloc_state(ce); if (err) return err; } - err = i915_active_acquire(&ce->active); + /* + * We always pin the context/ring/timeline here, to ensure a pin + * refcount for __intel_context_active(), which prevent a lock + * inversion of ce->pin_mutex vs dma_resv_lock(). + */ + + err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); + if (!err && ce->ring->vma->obj) + err = i915_gem_object_lock(ce->ring->vma->obj, ww); + if (!err && ce->state) + err = i915_gem_object_lock(ce->state->obj, ww); + if (!err) + err = intel_context_pre_pin(ce, ww); if (err) return err; - if (mutex_lock_interruptible(&ce->pin_mutex)) { - err = -EINTR; - goto out_release; - } + err = i915_active_acquire(&ce->active); + if (err) + goto err_ctx_unpin; + + err = ce->ops->pre_pin(ce, ww, &vaddr); + if (err) + goto err_release; + + err = mutex_lock_interruptible(&ce->pin_mutex); + if (err) + goto err_post_unpin; if (unlikely(intel_context_is_closed(ce))) { err = -ENOENT; - goto out_unlock; + goto err_unlock; } if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { err = intel_context_active_acquire(ce); if (unlikely(err)) - goto out_unlock; + goto err_unlock; - err = ce->ops->pin(ce); - if (unlikely(err)) - goto err_active; + err = ce->ops->pin(ce, vaddr); + if (err) { + intel_context_active_release(ce); + goto err_unlock; + } CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", i915_ggtt_offset(ce->ring->vma), ce->ring->head, ce->ring->tail); + handoff = true; smp_mb__before_atomic(); /* flush pin before it is visible */ atomic_inc(&ce->pin_count); } GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ - GEM_BUG_ON(i915_active_is_idle(&ce->active)); - goto out_unlock; -err_active: - intel_context_active_release(ce); -out_unlock: +err_unlock: mutex_unlock(&ce->pin_mutex); -out_release: +err_post_unpin: + if (!handoff) + ce->ops->post_unpin(ce); +err_release: i915_active_release(&ce->active); +err_ctx_unpin: + intel_context_post_unpin(ce); + + /* + * Unlock the hwsp_ggtt object since it's shared. + * In principle we can unlock all the global state locked above + * since it's pinned and doesn't need fencing, and will + * thus remain resident until it is explicitly unpinned. + */ + i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); + + return err; +} + +int __intel_context_do_pin(struct intel_context *ce) +{ + struct i915_gem_ww_ctx ww; + int err; + + i915_gem_ww_ctx_init(&ww, true); +retry: + err = __intel_context_do_pin_ww(ce, &ww); + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); return err; } @@ -154,6 +307,7 @@ void intel_context_unpin(struct intel_context *ce) CE_TRACE(ce, "unpin\n"); ce->ops->unpin(ce); + ce->ops->post_unpin(ce); /* * Once released, we may asynchronously drop the active reference. @@ -166,65 +320,6 @@ void intel_context_unpin(struct intel_context *ce) intel_context_put(ce); } -static int __context_pin_state(struct i915_vma *vma) -{ - unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; - int err; - - err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH); - if (err) - return err; - - err = i915_active_acquire(&vma->active); - if (err) - goto err_unpin; - - /* - * And mark it as a globally pinned object to let the shrinker know - * it cannot reclaim the object until we release it. - */ - i915_vma_make_unshrinkable(vma); - vma->obj->mm.dirty = true; - - return 0; - -err_unpin: - i915_vma_unpin(vma); - return err; -} - -static void __context_unpin_state(struct i915_vma *vma) -{ - i915_vma_make_shrinkable(vma); - i915_active_release(&vma->active); - __i915_vma_unpin(vma); -} - -static int __ring_active(struct intel_ring *ring) -{ - int err; - - err = intel_ring_pin(ring); - if (err) - return err; - - err = i915_active_acquire(&ring->vma->active); - if (err) - goto err_pin; - - return 0; - -err_pin: - intel_ring_unpin(ring); - return err; -} - -static void __ring_retire(struct intel_ring *ring) -{ - i915_active_release(&ring->vma->active); - intel_ring_unpin(ring); -} - __i915_active_call static void __intel_context_retire(struct i915_active *active) { @@ -235,48 +330,29 @@ static void __intel_context_retire(struct i915_active *active) intel_context_get_avg_runtime_ns(ce)); set_bit(CONTEXT_VALID_BIT, &ce->flags); - if (ce->state) - __context_unpin_state(ce->state); - - intel_timeline_unpin(ce->timeline); - __ring_retire(ce->ring); - + intel_context_post_unpin(ce); intel_context_put(ce); } static int __intel_context_active(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); - int err; - - CE_TRACE(ce, "active\n"); intel_context_get(ce); - err = __ring_active(ce->ring); - if (err) - goto err_put; + /* everything should already be activated by intel_context_pre_pin() */ + GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); + __intel_ring_pin(ce->ring); - err = intel_timeline_pin(ce->timeline); - if (err) - goto err_ring; + __intel_timeline_pin(ce->timeline); - if (!ce->state) - return 0; - - err = __context_pin_state(ce->state); - if (err) - goto err_timeline; + if (ce->state) { + GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); + __i915_vma_pin(ce->state); + i915_vma_make_unshrinkable(ce->state); + } return 0; - -err_timeline: - intel_timeline_unpin(ce->timeline); -err_ring: - __ring_retire(ce->ring); -err_put: - intel_context_put(ce); - return err; } void @@ -382,15 +458,37 @@ int intel_context_prepare_remote_request(struct intel_context *ce, struct i915_request *intel_context_create_request(struct intel_context *ce) { + struct i915_gem_ww_ctx ww; struct i915_request *rq; int err; - err = intel_context_pin(ce); - if (unlikely(err)) - return ERR_PTR(err); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = intel_context_pin_ww(ce, &ww); + if (!err) { + rq = i915_request_create(ce); + intel_context_unpin(ce); + } else if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } else { + rq = ERR_PTR(err); + } + + i915_gem_ww_ctx_fini(&ww); - rq = i915_request_create(ce); - intel_context_unpin(ce); + if (IS_ERR(rq)) + return rq; + + /* + * timeline->mutex should be the inner lock, but is used as outer lock. + * Hack around this to shut up lockdep in selftests.. + */ + lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); + mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); + mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); + rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); return rq; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 07be021882ccd3efef37af754d8da2d1a5d89773..fda2eba81e222a65384677d0234a77846ecaed9d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -25,6 +25,8 @@ ##__VA_ARGS__); \ } while (0) +struct i915_gem_ww_ctx; + void intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine); void intel_context_fini(struct intel_context *ce); @@ -81,6 +83,8 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce) } int __intel_context_do_pin(struct intel_context *ce); +int __intel_context_do_pin_ww(struct intel_context *ce, + struct i915_gem_ww_ctx *ww); static inline bool intel_context_pin_if_active(struct intel_context *ce) { @@ -95,6 +99,15 @@ static inline int intel_context_pin(struct intel_context *ce) return __intel_context_do_pin(ce); } +static inline int intel_context_pin_ww(struct intel_context *ce, + struct i915_gem_ww_ctx *ww) +{ + if (likely(intel_context_pin_if_active(ce))) + return 0; + + return __intel_context_do_pin_ww(ce, ww); +} + static inline void __intel_context_pin(struct intel_context *ce) { GEM_BUG_ON(!intel_context_is_pinned(ce)); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 4954b0df4864616497bbcce8b9a84d6e0e620446..552cb57a2e8cdc9201cc35717a8bc2a972019cd3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -23,6 +23,7 @@ DECLARE_EWMA(runtime, 3, 8); struct i915_gem_context; +struct i915_gem_ww_ctx; struct i915_vma; struct intel_context; struct intel_ring; @@ -30,8 +31,10 @@ struct intel_ring; struct intel_context_ops { int (*alloc)(struct intel_context *ce); - int (*pin)(struct intel_context *ce); + int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); + int (*pin)(struct intel_context *ce, void *vaddr); void (*unpin)(struct intel_context *ce); + void (*post_unpin)(struct intel_context *ce); void (*enter)(struct intel_context *ce); void (*exit)(struct intel_context *ce); diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index a9249a23903a92d252b23755dd078a22808d1280..08e2c000dcc3bc8121e49b7a8b7a7eaa74bf371c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -223,26 +223,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, void intel_engine_init_execlists(struct intel_engine_cs *engine); -void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); -void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); - -void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); - -static inline void -intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) -{ - irq_work_queue(&engine->breadcrumbs.irq_work); -} - -void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); -void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); - -void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine, - struct intel_context *ce); - -void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, - struct drm_printer *p); - static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index dd1a42c4d344ee70630a3f3b190e5e093933dc0f..5bfb5f7ed02c9aa1e78b0a7a0d06b6a0bc8447ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -28,6 +28,7 @@ #include "i915_drv.h" +#include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" @@ -213,7 +214,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) break; default: MISSING_CASE(class); - /* fall through */ + fallthrough; case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: @@ -634,7 +635,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine, else flags = PIN_HIGH; - return i915_ggtt_pin(vma, 0, flags); + return i915_ggtt_pin(vma, NULL, 0, flags); } static int init_status_page(struct intel_engine_cs *engine) @@ -700,8 +701,13 @@ static int engine_setup_common(struct intel_engine_cs *engine) if (err) return err; + engine->breadcrumbs = intel_breadcrumbs_create(engine); + if (!engine->breadcrumbs) { + err = -ENOMEM; + goto err_status; + } + intel_engine_init_active(engine, ENGINE_PHYSICAL); - intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); @@ -716,6 +722,10 @@ static int engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_ctx_wa(engine); return 0; + +err_status: + cleanup_status_page(engine); + return err; } struct measure_breadcrumb { @@ -785,9 +795,11 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) } static struct intel_context * -create_kernel_context(struct intel_engine_cs *engine) +create_pinned_context(struct intel_engine_cs *engine, + unsigned int hwsp, + struct lock_class_key *key, + const char *name) { - static struct lock_class_key kernel; struct intel_context *ce; int err; @@ -796,6 +808,7 @@ create_kernel_context(struct intel_engine_cs *engine) return ce; __set_bit(CONTEXT_BARRIER_BIT, &ce->flags); + ce->timeline = page_pack_bits(NULL, hwsp); err = intel_context_pin(ce); /* perma-pin so it is always available */ if (err) { @@ -809,11 +822,20 @@ create_kernel_context(struct intel_engine_cs *engine) * should we need to inject GPU operations during their request * construction. */ - lockdep_set_class(&ce->timeline->mutex, &kernel); + lockdep_set_class_and_name(&ce->timeline->mutex, key, name); return ce; } +static struct intel_context * +create_kernel_context(struct intel_engine_cs *engine) +{ + static struct lock_class_key kernel; + + return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR, + &kernel, "kernel_context"); +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -902,9 +924,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) tasklet_kill(&engine->execlists.tasklet); /* flush the callback */ cleanup_status_page(engine); + intel_breadcrumbs_free(engine->breadcrumbs); intel_engine_fini_retire(engine); - intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); if (engine->default_state) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 8ec3eecf3e397d0cda63fa33d47366e6f6818cbb..f7b2e07e22298d94e82a32377628712d52832017 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -6,6 +6,7 @@ #include "i915_drv.h" +#include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_engine.h" #include "intel_engine_heartbeat.h" @@ -247,7 +248,7 @@ static int __engine_park(struct intel_wakeref *wf) call_idle_barriers(engine); /* cleanup after wedging */ intel_engine_park_heartbeat(engine); - intel_engine_disarm_breadcrumbs(engine); + intel_breadcrumbs_park(engine->breadcrumbs); /* Must be reset upon idling, or we may miss the busy wakeup. */ GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 8de92fd7d392f85be0dc027c10824aed2cc0e2b6..c400aaa2287becc5e5f65895290d2d42dcad076e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -22,6 +22,7 @@ #include "i915_pmu.h" #include "i915_priolist_types.h" #include "i915_selftest.h" +#include "intel_breadcrumbs_types.h" #include "intel_sseu.h" #include "intel_timeline_types.h" #include "intel_uncore.h" @@ -373,34 +374,8 @@ struct intel_engine_cs { */ struct ewma__engine_latency latency; - /* Rather than have every client wait upon all user interrupts, - * with the herd waking after every interrupt and each doing the - * heavyweight seqno dance, we delegate the task (of being the - * bottom-half of the user interrupt) to the first client. After - * every interrupt, we wake up one client, who does the heavyweight - * coherent seqno read and either goes back to sleep (if incomplete), - * or wakes up all the completed clients in parallel, before then - * transferring the bottom-half status to the next client in the queue. - * - * Compared to walking the entire list of waiters in a single dedicated - * bottom-half, we reduce the latency of the first waiter by avoiding - * a context switch, but incur additional coherent seqno reads when - * following the chain of request breadcrumbs. Since it is most likely - * that we have a single client waiting on each seqno, then reducing - * the overhead of waking that client is much preferred. - */ - struct intel_breadcrumbs { - spinlock_t irq_lock; - struct list_head signalers; - - struct list_head signaled_requests; - - struct irq_work irq_work; /* for use from inside irq_lock */ - - unsigned int irq_enabled; - - bool irq_armed; - } breadcrumbs; + /* Keep track of all the seqno used, a trail of breadcrumbs */ + struct intel_breadcrumbs *breadcrumbs; struct intel_engine_pmu { /** diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 62979ea591f05b88917b686ba17cc5e1b39bbc01..81c05f551b9c885aa0566dc3cec607392b9b5060 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -78,8 +78,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915) { int ret; - stash_init(&i915->mm.wc_stash); - /* * Note that we use page colouring to enforce a guard page at the * end of the address space. This is required as the CS may prefetch @@ -232,7 +230,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, /* Fill the allocated but "unused" space beyond the end of the buffer */ while (gte < end) - gen8_set_pte(gte++, vm->scratch[0].encode); + gen8_set_pte(gte++, vm->scratch[0]->encode); /* * We want to flush the TLBs only after we're certain all the PTE @@ -283,7 +281,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, /* Fill the allocated but "unused" space beyond the end of the buffer */ while (gte < end) - iowrite32(vm->scratch[0].encode, gte++); + iowrite32(vm->scratch[0]->encode, gte++); /* * We want to flush the TLBs only after we're certain all the PTE @@ -303,7 +301,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned int first_entry = start / I915_GTT_PAGE_SIZE; unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch[0].encode; + const gen8_pte_t scratch_pte = vm->scratch[0]->encode; gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -401,7 +399,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->scratch[0].encode; + scratch_pte = vm->scratch[0]->encode; for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); } @@ -436,16 +434,17 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); } -static int ggtt_bind_vma(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static void ggtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { struct drm_i915_gem_object *obj = vma->obj; u32 pte_flags; if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK)) - return 0; + return; /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ pte_flags = 0; @@ -454,8 +453,6 @@ static int ggtt_bind_vma(struct i915_address_space *vm, vm->insert_entries(vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - - return 0; } static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) @@ -568,31 +565,25 @@ static int init_ggtt(struct i915_ggtt *ggtt) return ret; } -static int aliasing_gtt_bind_vma(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static void aliasing_gtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { u32 pte_flags; - int ret; /* Currently applicable only to VLV */ pte_flags = 0; if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; - if (flags & I915_VMA_LOCAL_BIND) { - struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias; - - ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags); - if (ret) - return ret; - } + if (flags & I915_VMA_LOCAL_BIND) + ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, + stash, vma, cache_level, flags); if (flags & I915_VMA_GLOBAL_BIND) vm->insert_entries(vm, vma, cache_level, pte_flags); - - return 0; } static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, @@ -607,6 +598,7 @@ static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) { + struct i915_vm_pt_stash stash = {}; struct i915_ppgtt *ppgtt; int err; @@ -619,15 +611,21 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) goto err_ppgtt; } + err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); + if (err) + goto err_ppgtt; + + err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); + if (err) + goto err_stash; + /* * Note we only pre-allocate as far as the end of the global * GTT. On 48b / 4-level page-tables, the difference is very, * very significant! We have to preallocate as GVT/vgpu does * not like the page directory disappearing. */ - err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); - if (err) - goto err_ppgtt; + ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); ggtt->alias = ppgtt; ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; @@ -638,8 +636,11 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; + i915_vm_free_pt_stash(&ppgtt->vm, &stash); return 0; +err_stash: + i915_vm_free_pt_stash(&ppgtt->vm, &stash); err_ppgtt: i915_vm_put(&ppgtt->vm); return err; @@ -715,18 +716,11 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) void i915_ggtt_driver_release(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - struct pagevec *pvec; fini_aliasing_ppgtt(ggtt); intel_ggtt_fini_fences(ggtt); ggtt_cleanup_hw(ggtt); - - pvec = &i915->mm.wc_stash.pvec; - if (pvec->nr) { - set_pages_array_wb(pvec->pages, pvec->nr); - __pagevec_release(pvec); - } } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -789,7 +783,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return -ENOMEM; } - ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); + ret = setup_scratch_page(&ggtt->vm); if (ret) { drm_err(&i915->drm, "Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ @@ -797,8 +791,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return ret; } - ggtt->vm.scratch[0].encode = - ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), + ggtt->vm.scratch[0]->encode = + ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), I915_CACHE_NONE, 0); return 0; @@ -824,7 +818,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm) struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); iounmap(ggtt->gsm); - cleanup_scratch_page(vm); + free_scratch(vm); } static struct resource pci_resource(struct pci_dev *pdev, int bar) @@ -852,6 +846,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) else size = gen8_get_total_gtt_size(snb_gmch_ctl); + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; ggtt->vm.cleanup = gen6_gmch_remove; ggtt->vm.insert_page = gen8_ggtt_insert_page; @@ -1000,6 +996,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) size = gen6_get_total_gtt_size(snb_gmch_ctl); ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.clear_range = nop_clear_range; if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) ggtt->vm.clear_range = gen6_ggtt_clear_range; @@ -1050,6 +1048,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ggtt->gmadr = (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->do_idle_maps = needs_idle_maps(i915); ggtt->vm.insert_page = i915_ggtt_insert_page; ggtt->vm.insert_entries = i915_ggtt_insert_entries; @@ -1165,11 +1165,6 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) ggtt->invalidate(ggtt); } -static unsigned int clear_bind(struct i915_vma *vma) -{ - return atomic_fetch_and(~I915_VMA_BIND_MASK, &vma->flags); -} - void i915_ggtt_resume(struct i915_ggtt *ggtt) { struct i915_vma *vma; @@ -1187,11 +1182,13 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt) /* clflush objects bound into the GGTT and rebind them. */ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; - unsigned int was_bound = clear_bind(vma); + unsigned int was_bound = + atomic_read(&vma->flags) & I915_VMA_BIND_MASK; - WARN_ON(i915_vma_bind(vma, - obj ? obj->cache_level : 0, - was_bound, NULL)); + GEM_BUG_ON(!was_bound); + vma->ops->bind_vma(&ggtt->vm, NULL, vma, + obj ? obj->cache_level : 0, + was_bound); if (obj) { /* only used during resume => exclusive access */ flush |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; @@ -1437,7 +1434,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) switch (vma->ggtt_view.type) { default: GEM_BUG_ON(vma->ggtt_view.type); - /* fall through */ + fallthrough; case I915_GGTT_VIEW_NORMAL: vma->pages = vma->obj->mm.pages; return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index e0755f1a904b283bbf79c2de04136ecb9d3d0890..39b428c5049c04f1dbb27a917e20c58b015acd03 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -356,7 +356,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) goto err_unref; } - ret = i915_ggtt_pin(vma, 0, PIN_HIGH); + ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); if (ret) goto err_unref; @@ -406,21 +406,20 @@ static int __engines_record_defaults(struct intel_gt *gt) /* We must be able to switch to something! */ GEM_BUG_ON(!engine->kernel_context); - err = intel_renderstate_init(&so, engine); - if (err) - goto out; - ce = intel_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; } - rq = intel_context_create_request(ce); + err = intel_renderstate_init(&so, ce); + if (err) + goto err; + + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); - intel_context_put(ce); - goto out; + goto err_fini; } err = intel_engine_emit_ctx_wa(rq); @@ -434,9 +433,13 @@ static int __engines_record_defaults(struct intel_gt *gt) err_rq: requests[id] = i915_request_get(rq); i915_request_add(rq); - intel_renderstate_fini(&so); - if (err) +err_fini: + intel_renderstate_fini(&so, ce); +err: + if (err) { + intel_context_put(ce); goto out; + } } /* Flush the default context image to memory, and enable powersaving. */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index 418ae184cecf8951a94bcb78a5086515d8689a11..4b7671ac5dca417c8fe3a2e9787591f5dd3ba4f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -35,39 +35,65 @@ static void node_free(struct intel_gt_buffer_pool_node *node) { i915_gem_object_put(node->obj); i915_active_fini(&node->active); - kfree(node); + kfree_rcu(node, rcu); } -static void pool_free_work(struct work_struct *wrk) +static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) { - struct intel_gt_buffer_pool *pool = - container_of(wrk, typeof(*pool), work.work); - struct intel_gt_buffer_pool_node *node, *next; - unsigned long old = jiffies - HZ; + struct intel_gt_buffer_pool_node *node, *stale = NULL; bool active = false; - LIST_HEAD(stale); int n; /* Free buffers that have not been used in the past second */ - spin_lock_irq(&pool->lock); for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { struct list_head *list = &pool->cache_list[n]; - /* Most recent at head; oldest at tail */ - list_for_each_entry_safe_reverse(node, next, list, link) { - if (time_before(node->age, old)) - break; + if (list_empty(list)) + continue; + + if (spin_trylock_irq(&pool->lock)) { + struct list_head *pos; + + /* Most recent at head; oldest at tail */ + list_for_each_prev(pos, list) { + unsigned long age; + + node = list_entry(pos, typeof(*node), link); + + age = READ_ONCE(node->age); + if (!age || jiffies - age < keep) + break; + + /* Check we are the first to claim this node */ + if (!xchg(&node->age, 0)) + break; - list_move(&node->link, &stale); + node->free = stale; + stale = node; + } + if (!list_is_last(pos, list)) + __list_del_many(pos, list); + + spin_unlock_irq(&pool->lock); } + active |= !list_empty(list); } - spin_unlock_irq(&pool->lock); - list_for_each_entry_safe(node, next, &stale, link) + while ((node = stale)) { + stale = stale->free; node_free(node); + } + + return active; +} + +static void pool_free_work(struct work_struct *wrk) +{ + struct intel_gt_buffer_pool *pool = + container_of(wrk, typeof(*pool), work.work); - if (active) + if (pool_free_older_than(pool, HZ)) schedule_delayed_work(&pool->work, round_jiffies_up_relative(HZ)); } @@ -109,8 +135,8 @@ static void pool_retire(struct i915_active *ref) i915_gem_object_make_purgeable(node->obj); spin_lock_irqsave(&pool->lock, flags); - node->age = jiffies; - list_add(&node->link, list); + list_add_rcu(&node->link, list); + WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ spin_unlock_irqrestore(&pool->lock, flags); schedule_delayed_work(&pool->work, @@ -151,20 +177,30 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size) struct intel_gt_buffer_pool *pool = >->buffer_pool; struct intel_gt_buffer_pool_node *node; struct list_head *list; - unsigned long flags; int ret; size = PAGE_ALIGN(size); list = bucket_for_size(pool, size); - spin_lock_irqsave(&pool->lock, flags); - list_for_each_entry(node, list, link) { + rcu_read_lock(); + list_for_each_entry_rcu(node, list, link) { + unsigned long age; + if (node->obj->base.size < size) continue; - list_del(&node->link); - break; + + age = READ_ONCE(node->age); + if (!age) + continue; + + if (cmpxchg(&node->age, age, 0) == age) { + spin_lock_irq(&pool->lock); + list_del_rcu(&node->link); + spin_unlock_irq(&pool->lock); + break; + } } - spin_unlock_irqrestore(&pool->lock, flags); + rcu_read_unlock(); if (&node->link == list) { node = node_create(pool, size); @@ -192,28 +228,13 @@ void intel_gt_init_buffer_pool(struct intel_gt *gt) INIT_DELAYED_WORK(&pool->work, pool_free_work); } -static void pool_free_imm(struct intel_gt_buffer_pool *pool) -{ - int n; - - spin_lock_irq(&pool->lock); - for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { - struct intel_gt_buffer_pool_node *node, *next; - struct list_head *list = &pool->cache_list[n]; - - list_for_each_entry_safe(node, next, list, link) - node_free(node); - INIT_LIST_HEAD(list); - } - spin_unlock_irq(&pool->lock); -} - void intel_gt_flush_buffer_pool(struct intel_gt *gt) { struct intel_gt_buffer_pool *pool = >->buffer_pool; do { - pool_free_imm(pool); + while (pool_free_older_than(pool, 0)) + ; } while (cancel_delayed_work_sync(&pool->work)); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h index e28bdda771ed14371e03276f97f1fc03e669b4cd..bcf1658c96338854a65bde82a371356cf24f763b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h @@ -25,7 +25,11 @@ struct intel_gt_buffer_pool_node { struct i915_active active; struct drm_i915_gem_object *obj; struct list_head link; - struct intel_gt_buffer_pool *pool; + union { + struct intel_gt_buffer_pool *pool; + struct intel_gt_buffer_pool_node *free; + struct rcu_head rcu; + }; unsigned long age; }; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index b05da68e52f4eb32320f273012493219bdcf6b3c..257063a57101bdda2e3766676c2c68849221cdb3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_irq.h" #include "intel_uncore.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 2a72cce63fd9131c35cb9f14dae27703e9161418..3f1114b58b0182c67733b81714557b09a61cca33 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -11,160 +11,24 @@ #include "intel_gt.h" #include "intel_gtt.h" -void stash_init(struct pagestash *stash) +struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) { - pagevec_init(&stash->pvec); - spin_lock_init(&stash->lock); -} - -static struct page *stash_pop_page(struct pagestash *stash) -{ - struct page *page = NULL; - - spin_lock(&stash->lock); - if (likely(stash->pvec.nr)) - page = stash->pvec.pages[--stash->pvec.nr]; - spin_unlock(&stash->lock); - - return page; -} - -static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) -{ - unsigned int nr; - - spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); - - nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); - memcpy(stash->pvec.pages + stash->pvec.nr, - pvec->pages + pvec->nr - nr, - sizeof(pvec->pages[0]) * nr); - stash->pvec.nr += nr; - - spin_unlock(&stash->lock); - - pvec->nr -= nr; -} - -static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) -{ - struct pagevec stack; - struct page *page; - if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); - page = stash_pop_page(&vm->free_pages); - if (page) - return page; - - if (!vm->pt_kmap_wc) - return alloc_page(gfp); - - /* Look in our global stash of WC pages... */ - page = stash_pop_page(&vm->i915->mm.wc_stash); - if (page) - return page; - - /* - * Otherwise batch allocate pages to amortize cost of set_pages_wc. - * - * We have to be careful as page allocation may trigger the shrinker - * (via direct reclaim) which will fill up the WC stash underneath us. - * So we add our WB pages into a temporary pvec on the stack and merge - * them into the WC stash after all the allocations are complete. - */ - pagevec_init(&stack); - do { - struct page *page; - - page = alloc_page(gfp); - if (unlikely(!page)) - break; - - stack.pages[stack.nr++] = page; - } while (pagevec_space(&stack)); - - if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { - page = stack.pages[--stack.nr]; - - /* Merge spare WC pages to the global stash */ - if (stack.nr) - stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); - - /* Push any surplus WC pages onto the local VM stash */ - if (stack.nr) - stash_push_pagevec(&vm->free_pages, &stack); - } - - /* Return unwanted leftovers */ - if (unlikely(stack.nr)) { - WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); - __pagevec_release(&stack); - } - - return page; + return i915_gem_object_create_internal(vm->i915, sz); } -static void vm_free_pages_release(struct i915_address_space *vm, - bool immediate) +int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) { - struct pagevec *pvec = &vm->free_pages.pvec; - struct pagevec stack; - - lockdep_assert_held(&vm->free_pages.lock); - GEM_BUG_ON(!pagevec_count(pvec)); - - if (vm->pt_kmap_wc) { - /* - * When we use WC, first fill up the global stash and then - * only if full immediately free the overflow. - */ - stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); - - /* - * As we have made some room in the VM's free_pages, - * we can wait for it to fill again. Unless we are - * inside i915_address_space_fini() and must - * immediately release the pages! - */ - if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) - return; + int err; - /* - * We have to drop the lock to allow ourselves to sleep, - * so take a copy of the pvec and clear the stash for - * others to use it as we sleep. - */ - stack = *pvec; - pagevec_reinit(pvec); - spin_unlock(&vm->free_pages.lock); - - pvec = &stack; - set_pages_array_wb(pvec->pages, pvec->nr); - - spin_lock(&vm->free_pages.lock); - } + err = i915_gem_object_pin_pages(obj); + if (err) + return err; - __pagevec_release(pvec); -} - -static void vm_free_page(struct i915_address_space *vm, struct page *page) -{ - /* - * On !llc, we need to change the pages back to WB. We only do so - * in bulk, so we rarely need to change the page attributes here, - * but doing so requires a stop_machine() from deep inside arch/x86/mm. - * To make detection of the possible sleep more likely, use an - * unconditional might_sleep() for everybody. - */ - might_sleep(); - spin_lock(&vm->free_pages.lock); - while (!pagevec_space(&vm->free_pages.pvec)) - vm_free_pages_release(vm, false); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); - pagevec_add(&vm->free_pages.pvec, page); - spin_unlock(&vm->free_pages.lock); + i915_gem_object_make_unshrinkable(obj); + return 0; } void __i915_vm_close(struct i915_address_space *vm) @@ -194,14 +58,7 @@ void __i915_vm_close(struct i915_address_space *vm) void i915_address_space_fini(struct i915_address_space *vm) { - spin_lock(&vm->free_pages.lock); - if (pagevec_count(&vm->free_pages.pvec)) - vm_free_pages_release(vm, true); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); - spin_unlock(&vm->free_pages.lock); - drm_mm_takedown(&vm->mm); - mutex_destroy(&vm->mutex); } @@ -246,8 +103,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) drm_mm_init(&vm->mm, 0, vm->total); vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; - stash_init(&vm->free_pages); - INIT_LIST_HEAD(&vm->bound_list); } @@ -264,64 +119,50 @@ void clear_pages(struct i915_vma *vma) memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); } -static int __setup_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p, - gfp_t gfp) -{ - p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); - if (unlikely(!p->page)) - return -ENOMEM; - - p->daddr = dma_map_page_attrs(vm->dma, - p->page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_NO_WARN); - if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { - vm_free_page(vm, p->page); - return -ENOMEM; - } - - return 0; -} - -int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) +dma_addr_t __px_dma(struct drm_i915_gem_object *p) { - return __setup_page_dma(vm, p, __GFP_HIGHMEM); + GEM_BUG_ON(!i915_gem_object_has_pages(p)); + return sg_dma_address(p->mm.pages->sgl); } -void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) +struct page *__px_page(struct drm_i915_gem_object *p) { - dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - vm_free_page(vm, p->page); + GEM_BUG_ON(!i915_gem_object_has_pages(p)); + return sg_page(p->mm.pages->sgl); } void -fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) +fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count) { - kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); + struct page *page = __px_page(p); + void *vaddr; + + vaddr = kmap(page); + memset64(vaddr, val, count); + clflush_cache_range(vaddr, PAGE_SIZE); + kunmap(page); } -static void poison_scratch_page(struct page *page, unsigned long size) +static void poison_scratch_page(struct drm_i915_gem_object *scratch) { - if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - return; + struct sgt_iter sgt; + struct page *page; + u8 val; - GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + val = 0; + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + val = POISON_FREE; - do { + for_each_sgt_page(page, sgt, scratch->mm.pages) { void *vaddr; vaddr = kmap(page); - memset(vaddr, POISON_FREE, PAGE_SIZE); + memset(vaddr, val, PAGE_SIZE); kunmap(page); - - page = pfn_to_page(page_to_pfn(page) + 1); - size -= PAGE_SIZE; - } while (size); + } } -int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) +int setup_scratch_page(struct i915_address_space *vm) { unsigned long size; @@ -338,21 +179,27 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) */ size = I915_GTT_PAGE_SIZE_4K; if (i915_vm_is_4lvl(vm) && - HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { + HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) size = I915_GTT_PAGE_SIZE_64K; - gfp |= __GFP_NOWARN; - } - gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; do { - unsigned int order = get_order(size); - struct page *page; - dma_addr_t addr; + struct drm_i915_gem_object *obj; - page = alloc_pages(gfp, order); - if (unlikely(!page)) + obj = vm->alloc_pt_dma(vm, size); + if (IS_ERR(obj)) goto skip; + if (pin_pt_dma(vm, obj)) + goto skip_obj; + + /* We need a single contiguous page for our scratch */ + if (obj->mm.page_sizes.sg < size) + goto skip_obj; + + /* And it needs to be correspondingly aligned */ + if (__px_dma(obj) & (size - 1)) + goto skip_obj; + /* * Use a non-zero scratch page for debugging. * @@ -362,61 +209,28 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) * should it ever be accidentally used, the effect should be * fairly benign. */ - poison_scratch_page(page, size); - - addr = dma_map_page_attrs(vm->dma, - page, 0, size, - PCI_DMA_BIDIRECTIONAL, - DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_NO_WARN); - if (unlikely(dma_mapping_error(vm->dma, addr))) - goto free_page; - - if (unlikely(!IS_ALIGNED(addr, size))) - goto unmap_page; - - vm->scratch[0].base.page = page; - vm->scratch[0].base.daddr = addr; - vm->scratch_order = order; + poison_scratch_page(obj); + + vm->scratch[0] = obj; + vm->scratch_order = get_order(size); return 0; -unmap_page: - dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); -free_page: - __free_pages(page, order); +skip_obj: + i915_gem_object_put(obj); skip: if (size == I915_GTT_PAGE_SIZE_4K) return -ENOMEM; size = I915_GTT_PAGE_SIZE_4K; - gfp &= ~__GFP_NOWARN; } while (1); } -void cleanup_scratch_page(struct i915_address_space *vm) -{ - struct i915_page_dma *p = px_base(&vm->scratch[0]); - unsigned int order = vm->scratch_order; - - dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, - PCI_DMA_BIDIRECTIONAL); - __free_pages(p->page, order); -} - void free_scratch(struct i915_address_space *vm) { int i; - if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ - return; - - for (i = 1; i <= vm->top; i++) { - if (!px_dma(&vm->scratch[i])) - break; - cleanup_page_dma(vm, px_base(&vm->scratch[i])); - } - - cleanup_scratch_page(vm); + for (i = 0; i <= vm->top; i++) + i915_gem_object_put(vm->scratch[i]); } void gtt_write_workarounds(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index f2b75078e05fa81923a6ecf16f2a1391d0e09820..c13c650ced2242c6d70819534ce6f418be195906 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -134,38 +134,29 @@ typedef u64 gen8_pte_t; #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) +enum i915_cache_level; + +struct drm_i915_file_private; +struct drm_i915_gem_object; struct i915_fence_reg; +struct i915_vma; +struct intel_gt; #define for_each_sgt_daddr(__dp, __iter, __sgt) \ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) -struct i915_page_dma { - struct page *page; +struct i915_page_table { + struct drm_i915_gem_object *base; union { - dma_addr_t daddr; - - /* - * For gen6/gen7 only. This is the offset in the GGTT - * where the page directory entries for PPGTT begin - */ - u32 ggtt_offset; + atomic_t used; + struct i915_page_table *stash; }; }; -struct i915_page_scratch { - struct i915_page_dma base; - u64 encode; -}; - -struct i915_page_table { - struct i915_page_dma base; - atomic_t used; -}; - struct i915_page_directory { struct i915_page_table pt; spinlock_t lock; - void *entry[512]; + void **entry; }; #define __px_choose_expr(x, type, expr, other) \ @@ -176,12 +167,14 @@ struct i915_page_directory { other) #define px_base(px) \ - __px_choose_expr(px, struct i915_page_dma *, __x, \ - __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ - __px_choose_expr(px, struct i915_page_table *, &__x->base, \ - __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ - (void)0)))) -#define px_dma(px) (px_base(px)->daddr) + __px_choose_expr(px, struct drm_i915_gem_object *, __x, \ + __px_choose_expr(px, struct i915_page_table *, __x->base, \ + __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \ + (void)0))) + +struct page *__px_page(struct drm_i915_gem_object *p); +dma_addr_t __px_dma(struct drm_i915_gem_object *p); +#define px_dma(px) (__px_dma(px_base(px))) #define px_pt(px) \ __px_choose_expr(px, struct i915_page_table *, __x, \ @@ -189,19 +182,18 @@ struct i915_page_directory { (void)0)) #define px_used(px) (&px_pt(px)->used) -enum i915_cache_level; - -struct drm_i915_file_private; -struct drm_i915_gem_object; -struct i915_vma; -struct intel_gt; +struct i915_vm_pt_stash { + /* preallocated chains of page tables/directories */ + struct i915_page_table *pt[2]; +}; struct i915_vma_ops { /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); + void (*bind_vma)(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); /* * Unmap an object from an address space. This usually consists of * setting the valid PTE entries to a reserved scratch page. @@ -213,13 +205,6 @@ struct i915_vma_ops { void (*clear_pages)(struct i915_vma *vma); }; -struct pagestash { - spinlock_t lock; - struct pagevec pvec; -}; - -void stash_init(struct pagestash *stash); - struct i915_address_space { struct kref ref; struct rcu_work rcu; @@ -256,33 +241,33 @@ struct i915_address_space { #define VM_CLASS_GGTT 0 #define VM_CLASS_PPGTT 1 - struct i915_page_scratch scratch[4]; - unsigned int scratch_order; - unsigned int top; - + struct drm_i915_gem_object *scratch[4]; /** * List of vma currently bound. */ struct list_head bound_list; - struct pagestash free_pages; - /* Global GTT */ bool is_ggtt:1; - /* Some systems require uncached updates of the page directories */ - bool pt_kmap_wc:1; - /* Some systems support read-only mappings for GGTT and/or PPGTT */ bool has_read_only:1; + u8 top; + u8 pd_shift; + u8 scratch_order; + + struct drm_i915_gem_object * + (*alloc_pt_dma)(struct i915_address_space *vm, int sz); + u64 (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, u32 flags); /* Create a valid PTE */ #define PTE_READ_ONLY BIT(0) - int (*allocate_va_range)(struct i915_address_space *vm, - u64 start, u64 length); + void (*allocate_va_range)(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + u64 start, u64 length); void (*clear_range)(struct i915_address_space *vm, u64 start, u64 length); void (*insert_page)(struct i915_address_space *vm, @@ -490,9 +475,9 @@ i915_pd_entry(const struct i915_page_directory * const pdp, static inline dma_addr_t i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) { - struct i915_page_dma *pt = ppgtt->pd->entry[n]; + struct i915_page_table *pt = ppgtt->pd->entry[n]; - return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); + return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]); } void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); @@ -517,13 +502,10 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); -int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); -void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); - -#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) +#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px))) void -fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); +fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) #define fill32_px(px, v) do { \ @@ -531,47 +513,51 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count); fill_px((px), v__ << 32 | v__); \ } while (0) -int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp); -void cleanup_scratch_page(struct i915_address_space *vm); +int setup_scratch_page(struct i915_address_space *vm); void free_scratch(struct i915_address_space *vm); +struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); struct i915_page_table *alloc_pt(struct i915_address_space *vm); struct i915_page_directory *alloc_pd(struct i915_address_space *vm); -struct i915_page_directory *__alloc_pd(size_t sz); +struct i915_page_directory *__alloc_pd(int npde); -void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd); +int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); -#define free_px(vm, px) free_pd(vm, px_base(px)) +void free_px(struct i915_address_space *vm, + struct i915_page_table *pt, int lvl); +#define free_pt(vm, px) free_px(vm, px, 0) +#define free_pd(vm, px) free_px(vm, px_pt(px), 1) void __set_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, - struct i915_page_dma * const to, + struct i915_page_table *pt, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)); #define set_pd_entry(pd, idx, to) \ - __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) + __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode) void clear_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, - const struct i915_page_scratch * const scratch); + const struct drm_i915_gem_object * const scratch); bool release_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, struct i915_page_table * const pt, - const struct i915_page_scratch * const scratch); + const struct drm_i915_gem_object * const scratch); void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); int ggtt_set_pages(struct i915_vma *vma); int ppgtt_set_pages(struct i915_vma *vma); void clear_pages(struct i915_vma *vma); -int ppgtt_bind_vma(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); +void ppgtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma); @@ -579,6 +565,14 @@ void gtt_write_workarounds(struct intel_gt *gt); void setup_private_pat(struct intel_uncore *uncore); +int i915_vm_alloc_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + u64 size); +int i915_vm_pin_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash); +void i915_vm_free_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash); + static inline struct sgt_dma { struct scatterlist *sg; dma_addr_t dma, max; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 24322ef08aa40e3d5cb368cd7136445908fe3094..0412a44f25f2e915aeac8edb980f06db8245d928 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -137,6 +137,7 @@ #include "i915_perf.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_engine_pm.h" #include "intel_gt.h" @@ -1148,20 +1149,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) } else { struct intel_engine_cs *owner = rq->context->engine; - /* - * Decouple the virtual breadcrumb before moving it - * back to the virtual engine -- we don't want the - * request to complete in the background and try - * and cancel the breadcrumb on the virtual engine - * (instead of the old engine where it is linked)! - */ - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &rq->fence.flags)) { - spin_lock_nested(&rq->lock, - SINGLE_DEPTH_NESTING); - i915_request_cancel_breadcrumb(rq); - spin_unlock(&rq->lock); - } WRITE_ONCE(rq->engine, owner); owner->submit_request(rq); active = NULL; @@ -1819,16 +1806,31 @@ static bool virtual_matches(const struct virtual_engine *ve, return true; } -static void virtual_xfer_breadcrumbs(struct virtual_engine *ve) +static void virtual_xfer_context(struct virtual_engine *ve, + struct intel_engine_cs *engine) { + unsigned int n; + + if (likely(engine == ve->siblings[0])) + return; + + GEM_BUG_ON(READ_ONCE(ve->context.inflight)); + if (!intel_engine_has_relative_mmio(engine)) + virtual_update_register_offsets(ve->context.lrc_reg_state, + engine); + /* - * All the outstanding signals on ve->siblings[0] must have - * been completed, just pending the interrupt handler. As those - * signals still refer to the old sibling (via rq->engine), we must - * transfer those to the old irq_worker to keep our locking - * consistent. + * Move the bound engine to the top of the list for + * future execution. We then kick this tasklet first + * before checking others, so that we preferentially + * reuse this set of bound registers. */ - intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context); + for (n = 1; n < ve->num_siblings; n++) { + if (ve->siblings[n] == engine) { + swap(ve->siblings[n], ve->siblings[0]); + break; + } + } } #define for_each_waiter(p__, rq__) \ @@ -2060,6 +2062,14 @@ static inline void clear_ports(struct i915_request **ports, int count) memset_p((void **)ports, NULL, count); } +static inline void +copy_ports(struct i915_request **dst, struct i915_request **src, int count) +{ + /* A memcpy_p() would be very useful here! */ + while (count--) + WRITE_ONCE(*dst++, *src++); /* avoid write tearing */ +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -2271,38 +2281,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(!(rq->execution_mask & engine->mask)); WRITE_ONCE(rq->engine, engine); - if (engine != ve->siblings[0]) { - u32 *regs = ve->context.lrc_reg_state; - unsigned int n; - - GEM_BUG_ON(READ_ONCE(ve->context.inflight)); - - if (!intel_engine_has_relative_mmio(engine)) - virtual_update_register_offsets(regs, - engine); - - if (!list_empty(&ve->context.signals)) - virtual_xfer_breadcrumbs(ve); - + if (__i915_request_submit(rq)) { /* - * Move the bound engine to the top of the list - * for future execution. We then kick this - * tasklet first before checking others, so that - * we preferentially reuse this set of bound - * registers. + * Only after we confirm that we will submit + * this request (i.e. it has not already + * completed), do we want to update the context. + * + * This serves two purposes. It avoids + * unnecessary work if we are resubmitting an + * already completed request after timeslicing. + * But more importantly, it prevents us altering + * ve->siblings[] on an idle context, where + * we may be using ve->siblings[] in + * virtual_context_enter / virtual_context_exit. */ - for (n = 1; n < ve->num_siblings; n++) { - if (ve->siblings[n] == engine) { - swap(ve->siblings[n], - ve->siblings[0]); - break; - } - } - + virtual_xfer_context(ve, engine); GEM_BUG_ON(ve->siblings[0] != engine); - } - if (__i915_request_submit(rq)) { submit = true; last = rq; } @@ -2648,10 +2643,9 @@ static void process_csb(struct intel_engine_cs *engine) /* switch pending to inflight */ GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); - memcpy(execlists->inflight, - execlists->pending, - execlists_num_ports(execlists) * - sizeof(*execlists->pending)); + copy_ports(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists)); smp_wmb(); /* complete the seqlock */ WRITE_ONCE(execlists->active, execlists->inflight); @@ -3309,7 +3303,10 @@ static void execlists_context_unpin(struct intel_context *ce) { check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, ce->engine); +} +static void execlists_context_post_unpin(struct intel_context *ce) +{ i915_gem_object_unpin_map(ce->state->obj); } @@ -3471,20 +3468,24 @@ __execlists_update_reg_state(const struct intel_context *ce, } static int -__execlists_context_pin(struct intel_context *ce, - struct intel_engine_cs *engine) +execlists_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, void **vaddr) { - void *vaddr; - GEM_BUG_ON(!ce->state); GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); - vaddr = i915_gem_object_pin_map(ce->state->obj, - i915_coherent_map_type(engine->i915) | + *vaddr = i915_gem_object_pin_map(ce->state->obj, + i915_coherent_map_type(ce->engine->i915) | I915_MAP_OVERRIDE); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); + return PTR_ERR_OR_ZERO(*vaddr); +} + +static int +__execlists_context_pin(struct intel_context *ce, + struct intel_engine_cs *engine, + void *vaddr) +{ ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; __execlists_update_reg_state(ce, engine, ce->ring->tail); @@ -3492,9 +3493,9 @@ __execlists_context_pin(struct intel_context *ce, return 0; } -static int execlists_context_pin(struct intel_context *ce) +static int execlists_context_pin(struct intel_context *ce, void *vaddr) { - return __execlists_context_pin(ce, ce->engine); + return __execlists_context_pin(ce, ce->engine, vaddr); } static int execlists_context_alloc(struct intel_context *ce) @@ -3520,8 +3521,10 @@ static void execlists_context_reset(struct intel_context *ce) static const struct intel_context_ops execlists_context_ops = { .alloc = execlists_context_alloc, + .pre_pin = execlists_context_pre_pin, .pin = execlists_context_pin, .unpin = execlists_context_unpin, + .post_unpin = execlists_context_post_unpin, .enter = intel_context_enter_engine, .exit = intel_context_exit_engine, @@ -3885,7 +3888,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) goto err; } - err = i915_ggtt_pin(vma, 0, PIN_HIGH); + err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); if (err) goto err; @@ -4126,7 +4129,7 @@ static int execlists_resume(struct intel_engine_cs *engine) { intel_mocs_init_engine(engine); - intel_engine_reset_breadcrumbs(engine); + intel_breadcrumbs_reset(engine->breadcrumbs); if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { struct drm_printer p = drm_debug_printer(__func__); @@ -4555,7 +4558,7 @@ static int gen8_emit_flush_render(struct i915_request *request, vf_flush_wa = true; /* WaForGAMHang:kbl */ - if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0)) + if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0)) dc_flush_wa = true; } @@ -4757,14 +4760,21 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode) intel_engine_mask_t aux_inv = 0; u32 cmd, *cs; + cmd = 4; + if (mode & EMIT_INVALIDATE) + cmd += 2; if (mode & EMIT_INVALIDATE) aux_inv = request->engine->mask & ~BIT(BCS0); + if (aux_inv) + cmd += 2 * hweight8(aux_inv) + 2; - cs = intel_ring_begin(request, - 4 + (aux_inv ? 2 * hweight8(aux_inv) + 2 : 0)); + cs = intel_ring_begin(request, cmd); if (IS_ERR(cs)) return PTR_ERR(cs); + if (mode & EMIT_INVALIDATE) + *cs++ = preparser_disable(true); + cmd = MI_FLUSH_DW + 1; /* We always require a command barrier so that subsequent @@ -4797,6 +4807,10 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode) } *cs++ = MI_NOOP; } + + if (mode & EMIT_INVALIDATE) + *cs++ = preparser_disable(false); + intel_ring_advance(request, cs); return 0; @@ -5295,6 +5309,14 @@ populate_lr_context(struct intel_context *ce, return 0; } +static struct intel_timeline *pinned_timeline(struct intel_context *ce) +{ + struct intel_timeline *tl = fetch_and_zero(&ce->timeline); + + return intel_timeline_create_from_engine(ce->engine, + page_unmask_bits(tl)); +} + static int __execlists_context_alloc(struct intel_context *ce, struct intel_engine_cs *engine) { @@ -5325,19 +5347,17 @@ static int __execlists_context_alloc(struct intel_context *ce, goto error_deref_obj; } - if (!ce->timeline) { + if (!page_mask_bits(ce->timeline)) { struct intel_timeline *tl; - struct i915_vma *hwsp; /* * Use the static global HWSP for the kernel context, and * a dynamically allocated cacheline for everyone else. */ - hwsp = NULL; - if (unlikely(intel_context_is_barrier(ce))) - hwsp = engine->status_page.vma; - - tl = intel_timeline_create(engine->gt, hwsp); + if (unlikely(ce->timeline)) + tl = pinned_timeline(ce); + else + tl = intel_timeline_create(engine->gt); if (IS_ERR(tl)) { ret = PTR_ERR(tl); goto error_deref_obj; @@ -5443,12 +5463,12 @@ static int virtual_context_alloc(struct intel_context *ce) return __execlists_context_alloc(ce, ve->siblings[0]); } -static int virtual_context_pin(struct intel_context *ce) +static int virtual_context_pin(struct intel_context *ce, void *vaddr) { struct virtual_engine *ve = container_of(ce, typeof(*ve), context); /* Note: we must use a real engine class for setting up reg state */ - return __execlists_context_pin(ce, ve->siblings[0]); + return __execlists_context_pin(ce, ve->siblings[0], vaddr); } static void virtual_context_enter(struct intel_context *ce) @@ -5476,8 +5496,10 @@ static void virtual_context_exit(struct intel_context *ce) static const struct intel_context_ops virtual_context_ops = { .alloc = virtual_context_alloc, + .pre_pin = execlists_context_pre_pin, .pin = virtual_context_pin, .unpin = execlists_context_unpin, + .post_unpin = execlists_context_post_unpin, .enter = virtual_context_enter, .exit = virtual_context_exit, @@ -5711,9 +5733,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); - intel_engine_init_breadcrumbs(&ve->base); intel_engine_init_execlists(&ve->base); - ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ ve->base.cops = &virtual_context_ops; ve->base.request_alloc = execlists_request_alloc; @@ -5730,6 +5750,12 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, intel_context_init(&ve->context, &ve->base); + ve->base.breadcrumbs = intel_breadcrumbs_create(NULL); + if (!ve->base.breadcrumbs) { + err = -ENOMEM; + goto err_put; + } + for (n = 0; n < count; n++) { struct intel_engine_cs *sibling = siblings[n]; diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index f0862e924d11a0824e524d46a56bd23242bb4459..46d9aceda64cdd260332794dec7dd974e9e58645 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -18,7 +18,8 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm) if (unlikely(!pt)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_page_dma(vm, &pt->base))) { + pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(pt->base)) { kfree(pt); return ERR_PTR(-ENOMEM); } @@ -27,14 +28,20 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm) return pt; } -struct i915_page_directory *__alloc_pd(size_t sz) +struct i915_page_directory *__alloc_pd(int count) { struct i915_page_directory *pd; - pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); + pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); if (unlikely(!pd)) return NULL; + pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL); + if (unlikely(!pd->entry)) { + kfree(pd); + return NULL; + } + spin_lock_init(&pd->lock); return pd; } @@ -43,11 +50,13 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm) { struct i915_page_directory *pd; - pd = __alloc_pd(sizeof(*pd)); + pd = __alloc_pd(I915_PDES); if (unlikely(!pd)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_page_dma(vm, px_base(pd)))) { + pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(pd->pt.base)) { + kfree(pd->entry); kfree(pd); return ERR_PTR(-ENOMEM); } @@ -55,41 +64,52 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return pd; } -void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) +void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) { - cleanup_page_dma(vm, pd); - kfree(pd); + BUILD_BUG_ON(offsetof(struct i915_page_directory, pt)); + + if (lvl) { + struct i915_page_directory *pd = + container_of(pt, typeof(*pd), pt); + kfree(pd->entry); + } + + if (pt->base) + i915_gem_object_put(pt->base); + + kfree(pt); } static inline void -write_dma_entry(struct i915_page_dma * const pdma, +write_dma_entry(struct drm_i915_gem_object * const pdma, const unsigned short idx, const u64 encoded_entry) { - u64 * const vaddr = kmap_atomic(pdma->page); + u64 * const vaddr = kmap_atomic(__px_page(pdma)); vaddr[idx] = encoded_entry; + clflush_cache_range(&vaddr[idx], sizeof(u64)); kunmap_atomic(vaddr); } void __set_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, - struct i915_page_dma * const to, + struct i915_page_table * const to, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { /* Each thread pre-pins the pd, and we may have a thread per pde. */ - GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry)); + GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES); atomic_inc(px_used(pd)); pd->entry[idx] = to; - write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); + write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC)); } void clear_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, - const struct i915_page_scratch * const scratch) + const struct drm_i915_gem_object * const scratch) { GEM_BUG_ON(atomic_read(px_used(pd)) == 0); @@ -102,7 +122,7 @@ bool release_pd_entry(struct i915_page_directory * const pd, const unsigned short idx, struct i915_page_table * const pt, - const struct i915_page_scratch * const scratch) + const struct drm_i915_gem_object * const scratch) { bool free = false; @@ -155,19 +175,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) return ppgtt; } -int ppgtt_bind_vma(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +void ppgtt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { u32 pte_flags; - int err; if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { - err = vm->allocate_va_range(vm, vma->node.start, vma->size); - if (err) - return err; - + vm->allocate_va_range(vm, stash, vma->node.start, vma->size); set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); } @@ -178,8 +195,6 @@ int ppgtt_bind_vma(struct i915_address_space *vm, vm->insert_entries(vm, vma, cache_level, pte_flags); wmb(); - - return 0; } void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) @@ -188,12 +203,93 @@ void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) vm->clear_range(vm, vma->node.start, vma->size); } +static unsigned long pd_count(u64 size, int shift) +{ + /* Beware later misalignment */ + return (size + 2 * (BIT_ULL(shift) - 1)) >> shift; +} + +int i915_vm_alloc_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + u64 size) +{ + unsigned long count; + int shift, n; + + shift = vm->pd_shift; + if (!shift) + return 0; + + count = pd_count(size, shift); + while (count--) { + struct i915_page_table *pt; + + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + i915_vm_free_pt_stash(vm, stash); + return PTR_ERR(pt); + } + + pt->stash = stash->pt[0]; + stash->pt[0] = pt; + } + + for (n = 1; n < vm->top; n++) { + shift += ilog2(I915_PDES); /* Each PD holds 512 entries */ + count = pd_count(size, shift); + while (count--) { + struct i915_page_directory *pd; + + pd = alloc_pd(vm); + if (IS_ERR(pd)) { + i915_vm_free_pt_stash(vm, stash); + return PTR_ERR(pd); + } + + pd->pt.stash = stash->pt[1]; + stash->pt[1] = &pd->pt; + } + } + + return 0; +} + +int i915_vm_pin_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash) +{ + struct i915_page_table *pt; + int n, err; + + for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { + for (pt = stash->pt[n]; pt; pt = pt->stash) { + err = pin_pt_dma(vm, pt->base); + if (err) + return err; + } + } + + return 0; +} + +void i915_vm_free_pt_stash(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash) +{ + struct i915_page_table *pt; + int n; + + for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { + while ((pt = stash->pt[n])) { + stash->pt[n] = pt->stash; + free_px(vm, pt, n); + } + } +} + int ppgtt_set_pages(struct i915_vma *vma) { GEM_BUG_ON(vma->pages); vma->pages = vma->obj->mm.pages; - vma->page_sizes = vma->obj->mm.page_sizes; return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 1bfad589c63b348d53b35943302ff3bf0c9f2c98..ea2a77c7b4692a0cfc769c966e8fcbf1e960b1f6 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -27,6 +27,7 @@ #include "i915_drv.h" #include "intel_renderstate.h" +#include "gt/intel_context.h" #include "intel_ring.h" static const struct intel_renderstate_rodata * @@ -157,33 +158,47 @@ static int render_state_setup(struct intel_renderstate *so, #undef OUT_BATCH int intel_renderstate_init(struct intel_renderstate *so, - struct intel_engine_cs *engine) + struct intel_context *ce) { - struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine = ce->engine; + struct drm_i915_gem_object *obj = NULL; int err; memset(so, 0, sizeof(*so)); so->rodata = render_state_get_rodata(engine); - if (!so->rodata) - return 0; + if (so->rodata) { + if (so->rodata->batch_items * 4 > PAGE_SIZE) + return -EINVAL; + + obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(so->vma)) { + err = PTR_ERR(so->vma); + goto err_obj; + } + } - if (so->rodata->batch_items * 4 > PAGE_SIZE) - return -EINVAL; + i915_gem_ww_ctx_init(&so->ww, true); +retry: + err = intel_context_pin_ww(ce, &so->ww); + if (err) + goto err_fini; - obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + /* return early if there's nothing to setup */ + if (!err && !so->rodata) + return 0; - so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); - if (IS_ERR(so->vma)) { - err = PTR_ERR(so->vma); - goto err_obj; - } + err = i915_gem_object_lock(so->vma->obj, &so->ww); + if (err) + goto err_context; err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) - goto err_obj; + goto err_context; err = render_state_setup(so, engine->i915); if (err) @@ -193,8 +208,18 @@ int intel_renderstate_init(struct intel_renderstate *so, err_unpin: i915_vma_unpin(so->vma); +err_context: + intel_context_unpin(ce); +err_fini: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&so->ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&so->ww); err_obj: - i915_gem_object_put(obj); + if (obj) + i915_gem_object_put(obj); so->vma = NULL; return err; } @@ -208,11 +233,9 @@ int intel_renderstate_emit(struct intel_renderstate *so, if (!so->vma) return 0; - i915_vma_lock(so->vma); err = i915_request_await_object(rq, so->vma->obj, false); if (err == 0) err = i915_vma_move_to_active(so->vma, rq, 0); - i915_vma_unlock(so->vma); if (err) return err; @@ -233,7 +256,17 @@ int intel_renderstate_emit(struct intel_renderstate *so, return 0; } -void intel_renderstate_fini(struct intel_renderstate *so) +void intel_renderstate_fini(struct intel_renderstate *so, + struct intel_context *ce) { - i915_vma_unpin_and_release(&so->vma, 0); + if (so->vma) { + i915_vma_unpin(so->vma); + i915_vma_close(so->vma); + } + + intel_context_unpin(ce); + i915_gem_ww_ctx_fini(&so->ww); + + if (so->vma) + i915_gem_object_put(so->vma->obj); } diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h index 5700be69a05a65dc97bebf7a6928208cd987ae95..713aa1e86c80be75a464db4ea96df1bde07dd584 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.h +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h @@ -25,9 +25,10 @@ #define _INTEL_RENDERSTATE_H_ #include +#include "i915_gem.h" struct i915_request; -struct intel_engine_cs; +struct intel_context; struct i915_vma; struct intel_renderstate_rodata { @@ -49,6 +50,7 @@ extern const struct intel_renderstate_rodata gen8_null_state; extern const struct intel_renderstate_rodata gen9_null_state; struct intel_renderstate { + struct i915_gem_ww_ctx ww; const struct intel_renderstate_rodata *rodata; struct i915_vma *vma; u32 batch_offset; @@ -58,9 +60,10 @@ struct intel_renderstate { }; int intel_renderstate_init(struct intel_renderstate *so, - struct intel_engine_cs *engine); + struct intel_context *ce); int intel_renderstate_emit(struct intel_renderstate *so, struct i915_request *rq); -void intel_renderstate_fini(struct intel_renderstate *so); +void intel_renderstate_fini(struct intel_renderstate *so, + struct intel_context *ce); #endif /* _INTEL_RENDERSTATE_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 46a5ceffc22f7fd4a02f3489a881e298b1b7cc05..ac36b67fb46bea97b7c34c827bbe1dc20b1dd70c 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -15,6 +15,7 @@ #include "i915_drv.h" #include "i915_gpu_error.h" #include "i915_irq.h" +#include "intel_breadcrumbs.h" #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index bdb324167ef3356728297edad1d1f512ba0496f9..4034a4bac7f08669927f5e8d9f1478ca46baf57a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -21,7 +21,13 @@ unsigned int intel_ring_update_space(struct intel_ring *ring) return space; } -int intel_ring_pin(struct intel_ring *ring) +void __intel_ring_pin(struct intel_ring *ring) +{ + GEM_BUG_ON(!atomic_read(&ring->pin_count)); + atomic_inc(&ring->pin_count); +} + +int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) { struct i915_vma *vma = ring->vma; unsigned int flags; @@ -39,7 +45,7 @@ int intel_ring_pin(struct intel_ring *ring) else flags |= PIN_HIGH; - ret = i915_ggtt_pin(vma, 0, flags); + ret = i915_ggtt_pin(vma, ww, 0, flags); if (unlikely(ret)) goto err_unpin; diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h index cc0ebca65167f141a1a61211a1838eec2d1a0e9e..1700579bdc93d6b41c9f91386ff9c3196533599e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.h +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -21,7 +21,8 @@ int intel_ring_cacheline_align(struct i915_request *rq); unsigned int intel_ring_update_space(struct intel_ring *ring); -int intel_ring_pin(struct intel_ring *ring); +void __intel_ring_pin(struct intel_ring *ring); +int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww); void intel_ring_unpin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 94915f668715d754ead2fca46900ae3483b78ff0..16b48e72c36910e846a8239dbaf8ee9ef8ef965a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -32,6 +32,7 @@ #include "gen6_ppgtt.h" #include "gen7_renderclear.h" #include "i915_drv.h" +#include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_gt.h" #include "intel_reset.h" @@ -100,7 +101,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) */ default: GEM_BUG_ON(engine->id); - /* fallthrough */ + fallthrough; case RCS0: hwsp = RENDER_HWS_PGA_GEN7; break; @@ -201,16 +202,18 @@ static struct i915_address_space *vm_alias(struct i915_address_space *vm) return vm; } +static u32 pp_dir(struct i915_address_space *vm) +{ + return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir; +} + static void set_pp_dir(struct intel_engine_cs *engine) { struct i915_address_space *vm = vm_alias(engine->gt->vm); if (vm) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); - ENGINE_WRITE(engine, RING_PP_DIR_BASE, - px_base(ppgtt->pd)->ggtt_offset << 10); + ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm)); } } @@ -255,7 +258,7 @@ static int xcs_resume(struct intel_engine_cs *engine) else ring_setup_status_page(engine); - intel_engine_reset_breadcrumbs(engine); + intel_breadcrumbs_reset(engine->breadcrumbs); /* Enforce ordering by reading HEAD register back */ ENGINE_POSTING_READ(engine, RING_HEAD); @@ -474,14 +477,16 @@ static void ring_context_destroy(struct kref *ref) intel_context_free(ce); } -static int __context_pin_ppgtt(struct intel_context *ce) +static int ring_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + void **unused) { struct i915_address_space *vm; int err = 0; vm = vm_alias(ce->vm); if (vm) - err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); + err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww); return err; } @@ -496,6 +501,10 @@ static void __context_unpin_ppgtt(struct intel_context *ce) } static void ring_context_unpin(struct intel_context *ce) +{ +} + +static void ring_context_post_unpin(struct intel_context *ce) { __context_unpin_ppgtt(ce); } @@ -584,9 +593,9 @@ static int ring_context_alloc(struct intel_context *ce) return 0; } -static int ring_context_pin(struct intel_context *ce) +static int ring_context_pin(struct intel_context *ce, void *unused) { - return __context_pin_ppgtt(ce); + return 0; } static void ring_context_reset(struct intel_context *ce) @@ -597,8 +606,10 @@ static void ring_context_reset(struct intel_context *ce) static const struct intel_context_ops ring_context_ops = { .alloc = ring_context_alloc, + .pre_pin = ring_context_pre_pin, .pin = ring_context_pin, .unpin = ring_context_unpin, + .post_unpin = ring_context_post_unpin, .enter = intel_context_enter_engine, .exit = intel_context_exit_engine, @@ -608,7 +619,7 @@ static const struct intel_context_ops ring_context_ops = { }; static int load_pd_dir(struct i915_request *rq, - const struct i915_ppgtt *ppgtt, + struct i915_address_space *vm, u32 valid) { const struct intel_engine_cs * const engine = rq->engine; @@ -624,7 +635,7 @@ static int load_pd_dir(struct i915_request *rq, *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; + *cs++ = pp_dir(vm); /* Stall until the page table load is complete? */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; @@ -826,7 +837,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) * post-sync op, this extra pass appears vital before a * mm switch! */ - ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G); + ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G); if (ret) return ret; @@ -1250,14 +1261,15 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) return -ENODEV; } - timeline = intel_timeline_create(engine->gt, engine->status_page.vma); + timeline = intel_timeline_create_from_engine(engine, + I915_GEM_HWS_SEQNO_ADDR); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; } GEM_BUG_ON(timeline->has_initial_breadcrumb); - err = intel_timeline_pin(timeline); + err = intel_timeline_pin(timeline, NULL); if (err) goto err_timeline; @@ -1267,7 +1279,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) goto err_timeline_unpin; } - err = intel_ring_pin(ring); + err = intel_ring_pin(ring, NULL); if (err) goto err_ring; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 97ba14ad52e4b390302581fafb7ef940e099eda2..e6a00eea063169e0c396f793ed5e585052d0237f 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -7,6 +7,7 @@ #include #include "i915_drv.h" +#include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" #include "intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 46d20f5f3ddcc93ff898d23cd3f779cff093d23d..a2f74cefe4c3fa59ed74ddb1bf284aabe0b7fedf 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -215,7 +215,8 @@ static void cacheline_free(struct intel_timeline_cacheline *cl) static int intel_timeline_init(struct intel_timeline *timeline, struct intel_gt *gt, - struct i915_vma *hwsp) + struct i915_vma *hwsp, + unsigned int offset) { void *vaddr; @@ -246,8 +247,7 @@ static int intel_timeline_init(struct intel_timeline *timeline, vaddr = page_mask_bits(cl->vaddr); } else { - timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; - + timeline->hwsp_offset = offset; vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); @@ -297,7 +297,9 @@ static void intel_timeline_fini(struct intel_timeline *timeline) } struct intel_timeline * -intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) +__intel_timeline_create(struct intel_gt *gt, + struct i915_vma *global_hwsp, + unsigned int offset) { struct intel_timeline *timeline; int err; @@ -306,7 +308,7 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) if (!timeline) return ERR_PTR(-ENOMEM); - err = intel_timeline_init(timeline, gt, global_hwsp); + err = intel_timeline_init(timeline, gt, global_hwsp, offset); if (err) { kfree(timeline); return ERR_PTR(err); @@ -315,14 +317,20 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) return timeline; } -int intel_timeline_pin(struct intel_timeline *tl) +void __intel_timeline_pin(struct intel_timeline *tl) +{ + GEM_BUG_ON(!atomic_read(&tl->pin_count)); + atomic_inc(&tl->pin_count); +} + +int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww) { int err; if (atomic_add_unless(&tl->pin_count, 1, 0)) return 0; - err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH); + err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH); if (err) return err; @@ -465,7 +473,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, goto err_rollback; } - err = i915_ggtt_pin(vma, 0, PIN_HIGH); + err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); if (err) { __idle_hwsp_free(vma->private, cacheline); goto err_rollback; @@ -484,7 +492,9 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, * free it after the current request is retired, which ensures that * all writes into the cacheline from previous requests are complete. */ - err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence); + err = i915_active_ref(&tl->hwsp_cacheline->active, + tl->fence_context, + &rq->fence); if (err) goto err_cacheline; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h index 4298b9ac7327eff83a7569a9559239753d0daff2..9882cd911d8edf6e4a383f269459e5fd4e3cb7f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -29,10 +29,27 @@ #include "i915_active.h" #include "i915_syncmap.h" -#include "gt/intel_timeline_types.h" +#include "intel_timeline_types.h" struct intel_timeline * -intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp); +__intel_timeline_create(struct intel_gt *gt, + struct i915_vma *global_hwsp, + unsigned int offset); + +static inline struct intel_timeline * +intel_timeline_create(struct intel_gt *gt) +{ + return __intel_timeline_create(gt, NULL, 0); +} + +static inline struct intel_timeline * +intel_timeline_create_from_engine(struct intel_engine_cs *engine, + unsigned int offset) +{ + return __intel_timeline_create(engine->gt, + engine->status_page.vma, + offset); +} static inline struct intel_timeline * intel_timeline_get(struct intel_timeline *timeline) @@ -71,7 +88,8 @@ static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); } -int intel_timeline_pin(struct intel_timeline *tl); +void __intel_timeline_pin(struct intel_timeline *tl); +int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww); void intel_timeline_enter(struct intel_timeline *tl); int intel_timeline_get_seqno(struct intel_timeline *tl, struct i915_request *rq, diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 5726cd0a37e048d1f5a97804d739242cd6bd249a..a3f72b75c61e70410d0d59466d824b8a218132a1 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -52,6 +52,24 @@ * - Public functions to init or apply the given workaround type. */ +/* + * KBL revision ID ordering is bizarre; higher revision ID's map to lower + * steppings in some cases. So rather than test against the revision ID + * directly, let's map that into our own range of increasing ID's that we + * can test against in a regular manner. + */ + +const struct i915_rev_steppings kbl_revids[] = { + [0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 }, + [1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 }, + [2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 }, + [3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 }, + [4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 }, + [5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 }, + [6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 }, + [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 }, +}; + static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) { wal->name = name; @@ -470,7 +488,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:kbl */ - if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) + if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -596,8 +614,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); } -static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, - struct i915_wa_list *wal) +static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) { /* * Wa_1409142259:tgl @@ -607,12 +625,28 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, * Wa_1409207793:tgl * Wa_1409178076:tgl * Wa_1408979724:tgl + * Wa_14010443199:rkl + * Wa_14010698770:rkl */ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + /* WaDisableGPGPUMidThreadPreemption:gen12 */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); +} + +static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen12_ctx_workarounds_init(engine, wal); + /* - * Wa_1604555607:gen12 and Wa_1608008084:gen12 + * Wa_1604555607:tgl,rkl + * + * Note that the implementation of this workaround is further modified + * according to the FF_MODE2 guidance given by Wa_1608008084:gen12. * FF_MODE2 register will return the wrong value when read. The default * value for this register is zero for all fields and there are no bit * masks. So instead of doing a RMW we should just write the GS Timer @@ -623,11 +657,6 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK, FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128, 0); - - /* WaDisableGPGPUMidThreadPreemption:tgl */ - WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, - GEN9_PREEMPT_GPGPU_LEVEL_MASK, - GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); } static void @@ -642,8 +671,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, wa_init_start(wal, name, engine->name); - if (IS_GEN(i915, 12)) + if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) tgl_ctx_workarounds_init(engine, wal); + else if (IS_GEN(i915, 12)) + gen12_ctx_workarounds_init(engine, wal); else if (IS_GEN(i915, 11)) icl_ctx_workarounds_init(engine, wal); else if (IS_CANNONLAKE(i915)) @@ -995,7 +1026,7 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) gen9_gt_workarounds_init(i915, wal); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) + if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0)) wa_write_or(wal, GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); @@ -1176,9 +1207,16 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +gen12_gt_workarounds_init(struct drm_i915_private *i915, + struct i915_wa_list *wal) { wa_init_mcr(i915, wal); +} + +static void +tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen12_gt_workarounds_init(i915, wal); /* Wa_1409420604:tgl */ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) @@ -1196,8 +1234,10 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) static void gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) { - if (IS_GEN(i915, 12)) + if (IS_TIGERLAKE(i915)) tgl_gt_workarounds_init(i915, wal); + else if (IS_GEN(i915, 12)) + gen12_gt_workarounds_init(i915, wal); else if (IS_GEN(i915, 11)) icl_gt_workarounds_init(i915, wal); else if (IS_CANNONLAKE(i915)) @@ -1629,18 +1669,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN9_CTX_PREEMPT_REG, GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); - /* - * Wa_1607030317:tgl - * Wa_1607186500:tgl - * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2 - * of then says it is fixed on B0 the other one says it is - * permanent - */ - wa_masked_en(wal, - GEN6_RC_SLEEP_PSMI_CONTROL, - GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | - GEN8_RC_SEMA_IDLE_MSG_DISABLE); - /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1654,22 +1682,17 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) VSUNIT_CLKGATE_DIS_TGL); } - if (IS_TIGERLAKE(i915)) { - /* Wa_1606931601:tgl */ + if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { + /* Wa_1606931601:tgl,rkl */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ); - /* Wa_1409804808:tgl */ + /* Wa_1409804808:tgl,rkl */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS); - /* Wa_1606700617:tgl */ - wa_masked_en(wal, - GEN9_CS_DEBUG_MODE1, - FF_DOP_CLOCK_GATE_DISABLE); - /* * Wa_1409085225:tgl - * Wa_14010229206:tgl + * Wa_14010229206:tgl,rkl */ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); @@ -1677,9 +1700,29 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * Wa_1407928979:tgl A* * Wa_18011464164:tgl B0+ * Wa_22010931296:tgl B0+ + * Wa_14010919138:rkl */ wa_write_or(wal, GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); + + /* + * Wa_1607030317:tgl + * Wa_1607186500:tgl + * Wa_1607297627:tgl,rkl there are multiple entries for this + * WA in the BSpec; some indicate this is an A0-only WA, + * others indicate it applies to all steppings. + */ + wa_masked_en(wal, + GEN6_RC_SLEEP_PSMI_CONTROL, + GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | + GEN8_RC_SEMA_IDLE_MSG_DISABLE); + } + + if (IS_TIGERLAKE(i915)) { + /* Wa_1606700617:tgl */ + wa_masked_en(wal, + GEN9_CS_DEBUG_MODE1, + FF_DOP_CLOCK_GATE_DISABLE); } if (IS_GEN(i915, 11)) { @@ -1898,7 +1941,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { + if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { wa_write(wal, RING_SEMA_WAIT_POLL(engine->mmio_base), 1); @@ -2045,6 +2088,7 @@ static int engine_wa_list_verify(struct intel_context *ce, const struct i915_wa *wa; struct i915_request *rq; struct i915_vma *vma; + struct i915_gem_ww_ctx ww; unsigned int i; u32 *results; int err; @@ -2057,29 +2101,34 @@ static int engine_wa_list_verify(struct intel_context *ce, return PTR_ERR(vma); intel_engine_pm_get(ce->engine); - rq = intel_context_create_request(ce); - intel_engine_pm_put(ce->engine); + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_gem_object_lock(vma->obj, &ww); + if (err == 0) + err = intel_context_pin_ww(ce, &ww); + if (err) + goto err_pm; + + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_vma; + goto err_unpin; } - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, true); if (err == 0) err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); - if (err) { - i915_request_add(rq); - goto err_vma; - } - - err = wa_list_srm(rq, wal, vma); - if (err) - goto err_vma; + if (err == 0) + err = wa_list_srm(rq, wal, vma); i915_request_get(rq); + if (err) + i915_request_set_error_once(rq, err); i915_request_add(rq); + + if (err) + goto err_rq; + if (i915_request_wait(rq, 0, HZ / 5) < 0) { err = -ETIME; goto err_rq; @@ -2104,7 +2153,16 @@ static int engine_wa_list_verify(struct intel_context *ce, err_rq: i915_request_put(rq); -err_vma: +err_unpin: + intel_context_unpin(ce); +err_pm: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + intel_engine_pm_put(ce->engine); i915_vma_unpin(vma); i915_vma_put(vma); return err; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index b8dd3cbc8696bdb1f202efa7e3e2547828341b83..dfd1cfb8a7ec5b5e315ad7e8337cdf0b773fba2a 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -131,6 +131,10 @@ static void mock_context_unpin(struct intel_context *ce) { } +static void mock_context_post_unpin(struct intel_context *ce) +{ +} + static void mock_context_destroy(struct kref *ref) { struct intel_context *ce = container_of(ref, typeof(*ce), ref); @@ -152,8 +156,7 @@ static int mock_context_alloc(struct intel_context *ce) if (!ce->ring) return -ENOMEM; - GEM_BUG_ON(ce->timeline); - ce->timeline = intel_timeline_create(ce->engine->gt, NULL); + ce->timeline = intel_timeline_create(ce->engine->gt); if (IS_ERR(ce->timeline)) { kfree(ce->engine); return PTR_ERR(ce->timeline); @@ -164,7 +167,13 @@ static int mock_context_alloc(struct intel_context *ce) return 0; } -static int mock_context_pin(struct intel_context *ce) +static int mock_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, void **unused) +{ + return 0; +} + +static int mock_context_pin(struct intel_context *ce, void *unused) { return 0; } @@ -176,8 +185,10 @@ static void mock_context_reset(struct intel_context *ce) static const struct intel_context_ops mock_context_ops = { .alloc = mock_context_alloc, + .pre_pin = mock_context_pre_pin, .pin = mock_context_pin, .unpin = mock_context_unpin, + .post_unpin = mock_context_post_unpin, .enter = intel_context_enter_engine, .exit = intel_context_exit_engine, @@ -261,11 +272,12 @@ static void mock_engine_release(struct intel_engine_cs *engine) GEM_BUG_ON(timer_pending(&mock->hw_delay)); + intel_breadcrumbs_free(engine->breadcrumbs); + intel_context_unpin(engine->kernel_context); intel_context_put(engine->kernel_context); intel_engine_fini_retire(engine); - intel_engine_fini_breadcrumbs(engine); } struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, @@ -323,20 +335,26 @@ int mock_engine_init(struct intel_engine_cs *engine) struct intel_context *ce; intel_engine_init_active(engine, ENGINE_MOCK); - intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init__pm(engine); intel_engine_init_retire(engine); + engine->breadcrumbs = intel_breadcrumbs_create(NULL); + if (!engine->breadcrumbs) + return -ENOMEM; + ce = create_kernel_context(engine); if (IS_ERR(ce)) goto err_breadcrumbs; + /* We insist the kernel context is using the status_page */ + engine->status_page.vma = ce->timeline->hwsp_ggtt; + engine->kernel_context = ce; return 0; err_breadcrumbs: - intel_engine_fini_breadcrumbs(engine); + intel_breadcrumbs_free(engine->breadcrumbs); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index 52af1cee9a94eb4845766538d2598537d9dd7120..1f4020e906a8aecf64503f97da76176cc8a6be07 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -68,6 +68,8 @@ static int context_sync(struct intel_context *ce) } while (!err); mutex_unlock(&tl->mutex); + /* Wait for all barriers to complete (remote CPU) before we check */ + i915_active_unlock_wait(&ce->active); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 73243ba59c7d2309b3f1dcb5133b4916eba4ddca..e73854dd2fe0d69939e577bc3d42a6891099d4a1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -47,7 +47,10 @@ static int pulse_active(struct i915_active *active) static void pulse_free(struct kref *kref) { - kfree(container_of(kref, struct pulse, kref)); + struct pulse *p = container_of(kref, typeof(*p), kref); + + i915_active_fini(&p->active); + kfree(p); } static void pulse_put(struct pulse *p) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 3fc5de961280ec6e37da92e128558c42b6c6570a..95d41c01d0e04221f6f22733d3f1d5a65e2107a9 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -2729,7 +2729,7 @@ static int create_gang(struct intel_engine_cs *engine, i915_gem_object_put(obj); intel_context_put(ce); - rq->client_link.next = &(*prev)->client_link; + rq->mock.link.next = &(*prev)->mock.link; *prev = rq; return 0; @@ -2970,8 +2970,7 @@ static int live_preempt_gang(void *arg) } while (rq) { /* wait for each rq from highest to lowest prio */ - struct i915_request *n = - list_next_entry(rq, client_link); + struct i915_request *n = list_next_entry(rq, mock.link); if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = @@ -3090,7 +3089,7 @@ static struct i915_vma *create_global(struct intel_gt *gt, size_t sz) return vma; } - err = i915_ggtt_pin(vma, 0, 0); + err = i915_ggtt_pin(vma, NULL, 0, 0); if (err) { i915_vma_put(vma); return ERR_PTR(err); @@ -4997,6 +4996,7 @@ static int __live_lrc_state(struct intel_engine_cs *engine, { struct intel_context *ce; struct i915_request *rq; + struct i915_gem_ww_ctx ww; enum { RING_START_IDX = 0, RING_TAIL_IDX, @@ -5011,7 +5011,11 @@ static int __live_lrc_state(struct intel_engine_cs *engine, if (IS_ERR(ce)) return PTR_ERR(ce); - err = intel_context_pin(ce); + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_gem_object_lock(scratch->obj, &ww); + if (!err) + err = intel_context_pin_ww(ce, &ww); if (err) goto err_put; @@ -5040,11 +5044,9 @@ static int __live_lrc_state(struct intel_engine_cs *engine, *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32); *cs++ = 0; - i915_vma_lock(scratch); err = i915_request_await_object(rq, scratch->obj, true); if (!err) err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(scratch); i915_request_get(rq); i915_request_add(rq); @@ -5081,6 +5083,12 @@ static int __live_lrc_state(struct intel_engine_cs *engine, err_unpin: intel_context_unpin(ce); err_put: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); intel_context_put(ce); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 8624f5d2a1f3554c8be6cdbb68f101d7b8a74247..3540ba9bd459983b3968971aeda1c2cdb79195ac 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -77,20 +77,20 @@ create_spin_counter(struct intel_engine_cs *engine, vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { - i915_gem_object_put(obj); - return vma; + err = PTR_ERR(vma); + goto err_put; } err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) { - i915_vma_put(vma); - return ERR_PTR(err); - } + if (err) + goto err_unlock; + + i915_vma_lock(vma); base = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(base)) { - i915_gem_object_put(obj); - return ERR_CAST(base); + err = PTR_ERR(base); + goto err_unpin; } cs = base; @@ -134,6 +134,14 @@ create_spin_counter(struct intel_engine_cs *engine, *cancel = base + loop; *counter = srm ? memset32(base + end, 0, 1) : NULL; return vma; + +err_unpin: + i915_vma_unpin(vma); +err_unlock: + i915_vma_unlock(vma); +err_put: + i915_gem_object_put(obj); + return ERR_PTR(err); } static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) @@ -639,7 +647,6 @@ int live_rps_frequency_cs(void *arg) goto err_vma; } - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, false); if (!err) err = i915_vma_move_to_active(vma, rq, 0); @@ -647,7 +654,6 @@ int live_rps_frequency_cs(void *arg) err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - i915_vma_unlock(vma); i915_request_add(rq); if (err) goto err_vma; @@ -700,7 +706,7 @@ int live_rps_frequency_cs(void *arg) f = act; /* may skip ahead [pcu granularity] */ } - err = -EINVAL; + err = -EINTR; /* ignore error, continue on with test */ } err_vma: @@ -708,6 +714,7 @@ int live_rps_frequency_cs(void *arg) i915_gem_object_flush_map(vma->obj); i915_gem_object_unpin_map(vma->obj); i915_vma_unpin(vma); + i915_vma_unlock(vma); i915_vma_put(vma); st_engine_heartbeat_enable(engine); @@ -781,7 +788,6 @@ int live_rps_frequency_srm(void *arg) goto err_vma; } - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, false); if (!err) err = i915_vma_move_to_active(vma, rq, 0); @@ -789,7 +795,6 @@ int live_rps_frequency_srm(void *arg) err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - i915_vma_unlock(vma); i915_request_add(rq); if (err) goto err_vma; @@ -841,7 +846,7 @@ int live_rps_frequency_srm(void *arg) f = act; /* may skip ahead [pcu granularity] */ } - err = -EINVAL; + err = -EINTR; /* ignore error, continue on with test */ } err_vma: @@ -849,6 +854,7 @@ int live_rps_frequency_srm(void *arg) i915_gem_object_flush_map(vma->obj); i915_gem_object_unpin_map(vma->obj); i915_vma_unpin(vma); + i915_vma_unlock(vma); i915_vma_put(vma); st_engine_heartbeat_enable(engine); diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index fb5b7d3498a677d6bf7302d9cf49611c192e1527..96d164a3841dcfe3cd013245c7b3e3babd88afe5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -72,7 +72,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, unsigned long cacheline; int err; - tl = intel_timeline_create(state->gt, NULL); + tl = intel_timeline_create(state->gt); if (IS_ERR(tl)) return PTR_ERR(tl); @@ -455,7 +455,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) struct i915_request *rq; int err; - err = intel_timeline_pin(tl); + err = intel_timeline_pin(tl, NULL); if (err) { rq = ERR_PTR(err); goto out; @@ -487,11 +487,11 @@ checked_intel_timeline_create(struct intel_gt *gt) { struct intel_timeline *tl; - tl = intel_timeline_create(gt, NULL); + tl = intel_timeline_create(gt); if (IS_ERR(tl)) return tl; - if (*tl->hwsp_seqno != tl->seqno) { + if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) { pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", *tl->hwsp_seqno, tl->seqno); intel_timeline_put(tl); @@ -561,9 +561,9 @@ static int live_hwsp_engine(void *arg) for (n = 0; n < count; n++) { struct intel_timeline *tl = timelines[n]; - if (!err && *tl->hwsp_seqno != n) { - pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n", - n, tl->hwsp_offset, *tl->hwsp_seqno); + if (!err && READ_ONCE(*tl->hwsp_seqno) != n) { + GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n", + n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno); GEM_TRACE_DUMP(); err = -EINVAL; } @@ -633,9 +633,9 @@ static int live_hwsp_alternate(void *arg) for (n = 0; n < count; n++) { struct intel_timeline *tl = timelines[n]; - if (!err && *tl->hwsp_seqno != n) { - pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n", - n, tl->hwsp_offset, *tl->hwsp_seqno); + if (!err && READ_ONCE(*tl->hwsp_seqno) != n) { + GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n", + n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno); GEM_TRACE_DUMP(); err = -EINVAL; } @@ -660,14 +660,14 @@ static int live_hwsp_wrap(void *arg) * foreign GPU references. */ - tl = intel_timeline_create(gt, NULL); + tl = intel_timeline_create(gt); if (IS_ERR(tl)) return PTR_ERR(tl); if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) goto out_free; - err = intel_timeline_pin(tl); + err = intel_timeline_pin(tl, NULL); if (err) goto out_free; @@ -733,7 +733,8 @@ static int live_hwsp_wrap(void *arg) goto out; } - if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { + if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] || + READ_ONCE(*hwsp_seqno[1]) != seqno[1]) { pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", *hwsp_seqno[0], *hwsp_seqno[1], seqno[0], seqno[1]); @@ -966,9 +967,10 @@ static int live_hwsp_recycle(void *arg) break; } - if (*tl->hwsp_seqno != count) { - pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n", - count, *tl->hwsp_seqno); + if (READ_ONCE(*tl->hwsp_seqno) != count) { + GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n", + count, tl->fence_context, + tl->hwsp_offset, *tl->hwsp_seqno); GEM_TRACE_DUMP(); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index febc9e6692bac394b089766c4edfdda43621773f..61a0532d0f3d37a50ec355aac21888e2b6969e1e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -214,7 +214,7 @@ static int check_whitelist(struct i915_gem_context *ctx, return PTR_ERR(results); err = 0; - i915_gem_object_lock(results); + i915_gem_object_lock(results, NULL); intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); i915_gem_object_unlock(results); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 861657897c0f99f038e305a237cab592c3bebb85..942c7c187adbaaf1d4c5601411f09567b62620d9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -677,7 +677,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) goto err; flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - ret = i915_ggtt_pin(vma, 0, flags); + ret = i915_ggtt_pin(vma, NULL, 0, flags); if (ret) { vma = ERR_PTR(ret); goto err; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 59b27aba15c6f0fd54efb11be05777d814bb18bf..80e8b6c3bc8c5adf4da55fc2525b4522e5e08279 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -51,8 +51,8 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * Note that RKL uses the same firmware as TGL. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \ - fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \ + fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 072725a448db228652bfcf865da73bbebab98fa1..ad86c5eb5bba0875c7a57bbc6b23818b56a6ade1 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, { u8 *cfg_base = vgpu_cfg_space(vgpu); u8 mask, new, old; + pci_power_t pwr; int i = 0; for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { @@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, /* For other configuration space directly copy as it is. */ if (i < bytes) memcpy(cfg_base + off + i, src + i, bytes - i); + + if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) { + pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off]) + & PCI_PM_CTRL_STATE_MASK); + if (pwr == PCI_D3hot) + vgpu->d3_entered = true; + gvt_dbg_core("vgpu-%d power status changed to %d\n", + vgpu->id, pwr); + } } /** @@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; + u8 next; memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, info->cfg_space_size); @@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, pci_resource_len(gvt->gt->i915->drm.pdev, 2); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); + + /* PM Support */ + vgpu->cfg_space.pmcsr_off = 0; + if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) { + next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST]; + do { + if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) { + vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL; + break; + } + next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT]; + } while (next); + } } /** diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index f1940939260aaa401f5dba6174d463203d9a179c..d0a599b51bfedfacbb0b9061f3a68c8546726f24 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1923,6 +1923,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) if (ret) goto err_unmap; + i915_gem_object_unlock(bb->obj); INIT_LIST_HEAD(&bb->list); list_add(&bb->list, &s->workload->shadow_bb); @@ -2982,7 +2983,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) goto put_obj; } - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); ret = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (ret) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 210016192ce704036608a04d05ceaa79a7548d09..a3a4305eda01b62dc370bd5fd63ee834ee56570e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return create_scratch_page_tree(vgpu); } -static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) +void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_mm *mm; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 320b8d6ad92fa4e0f7ba6bd0f1d34c0435cfc44c..52d0d88abd86a02f06cc8098e6fa832297715c9a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); +void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu); + #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index a4a6db6b7f9087c96be422b73a2e9f2f542ca6db..ff7f2515a6fe6d0185694cd74dc50d4e6338b1e5 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar { struct intel_vgpu_cfg_space { unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; + u32 pmcsr_off; }; #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) @@ -198,6 +199,8 @@ struct intel_vgpu { struct intel_vgpu_submission submission; struct radix_tree_root page_track_tree; u32 hws_pga[I915_NUM_ENGINES]; + /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ + bool d3_entered; struct dentry *debugfs; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 63bba7b4bb2fbe00f8d09a3a47da4891bec221fc..05f3bc98d242d38fe813db1d5562555016d64c20 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1226,7 +1226,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; - /* fall through */ + fallthrough; case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 3c3b9842bbbdcf9cd7a726c8ce2a650a0018df4b..1570eb8aa97836130e8f3ca703947359991a9c3e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -403,6 +403,14 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.shadow_va = NULL; } +static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr) +{ + struct scatterlist *sg = pd->pt.base->mm.pages->sgl; + + /* This is not a good idea */ + sg->dma_address = addr; +} + static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct intel_context *ce) { @@ -411,7 +419,7 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, int i = 0; if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { - px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; + set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]); } else { for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { struct i915_page_directory * const pd = @@ -421,7 +429,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, shadow ppgtt. */ if (!pd) break; - px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; + + set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]); } } } @@ -1240,13 +1249,13 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, int i; if (i915_vm_is_4lvl(&ppgtt->vm)) { - px_dma(ppgtt->pd) = s->i915_context_pml4; + set_dma_address(ppgtt->pd, s->i915_context_pml4); } else { for (i = 0; i < GEN8_3LVL_PDPES; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); - px_dma(pd) = s->i915_context_pdps[i]; + set_dma_address(pd, s->i915_context_pdps[i]); } } } diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 7d361623ff6792d7866522d5b98073914373cdae..8fa9b31a248401ffc28a0b862827e3da6c4f2fa0 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu) intel_gvt_deactivate_vgpu(vgpu); mutex_lock(&vgpu->vgpu_lock); + vgpu->d3_entered = false; intel_vgpu_clean_workloads(vgpu, ALL_ENGINES); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); @@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init(&vgpu->object_idr); intel_vgpu_init_cfg_space(vgpu, param->primary); + vgpu->d3_entered = false; ret = intel_vgpu_init_mmio(vgpu); if (ret) @@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - intel_vgpu_invalidate_ppgtt(vgpu); + if (engine_mask == ALL_ENGINES) + intel_vgpu_invalidate_ppgtt(vgpu); /*fence will not be reset during virtual reset */ if (dmlr) { - intel_vgpu_reset_gtt(vgpu); + if(!vgpu->d3_entered) { + intel_vgpu_invalidate_ppgtt(vgpu); + intel_vgpu_destroy_all_ppgtt_mm(vgpu); + } + intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_reset_resource(vgpu); } @@ -572,7 +579,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_cfg_space(vgpu); /* only reset the failsafe mode when dmlr reset */ vgpu->failsafe = false; - vgpu->pv_notified = false; + /* + * PCI_D0 is set before dmlr, so reset d3_entered here + * after done using. + */ + if(vgpu->d3_entered) + vgpu->d3_entered = false; + else + vgpu->pv_notified = false; } } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index d960d0be5bd2fb62cae1261f35b51c8174cc76ef..b0a6522be3d1401fd6a6d7fcd422a6f32625dc6b 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -28,12 +28,14 @@ static struct i915_global_active { } global; struct active_node { + struct rb_node node; struct i915_active_fence base; struct i915_active *ref; - struct rb_node node; u64 timeline; }; +#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node) + static inline struct active_node * node_from_active(struct i915_active_fence *active) { @@ -128,8 +130,8 @@ static inline void debug_active_assert(struct i915_active *ref) { } static void __active_retire(struct i915_active *ref) { + struct rb_root root = RB_ROOT; struct active_node *it, *n; - struct rb_root root; unsigned long flags; GEM_BUG_ON(i915_active_is_idle(ref)); @@ -141,9 +143,25 @@ __active_retire(struct i915_active *ref) GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); debug_active_deactivate(ref); - root = ref->tree; - ref->tree = RB_ROOT; - ref->cache = NULL; + /* Even if we have not used the cache, we may still have a barrier */ + if (!ref->cache) + ref->cache = fetch_node(ref->tree.rb_node); + + /* Keep the MRU cached node for reuse */ + if (ref->cache) { + /* Discard all other nodes in the tree */ + rb_erase(&ref->cache->node, &ref->tree); + root = ref->tree; + + /* Rebuild the tree with only the cached node */ + rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node); + rb_insert_color(&ref->cache->node, &ref->tree); + GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); + + /* Make the cached node available for reuse with any timeline */ + if (IS_ENABLED(CONFIG_64BIT)) + ref->cache->timeline = 0; /* needs cmpxchg(u64) */ + } spin_unlock_irqrestore(&ref->tree_lock, flags); @@ -154,6 +172,7 @@ __active_retire(struct i915_active *ref) /* ... except if you wait on it, you must manage your own references! */ wake_up_var(ref); + /* Finally free the discarded timeline tree */ rbtree_postorder_for_each_entry_safe(it, n, &root, node) { GEM_BUG_ON(i915_active_fence_isset(&it->base)); kmem_cache_free(global.slab_cache, it); @@ -216,12 +235,11 @@ excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) active_retire(container_of(cb, struct i915_active, excl.cb)); } -static struct i915_active_fence * -active_instance(struct i915_active *ref, struct intel_timeline *tl) +static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) { - struct active_node *node, *prealloc; - struct rb_node **p, *parent; - u64 idx = tl->fence_context; + struct active_node *it; + + GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */ /* * We track the most recently used timeline to skip a rbtree search @@ -230,8 +248,59 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) * after the previous activity has been retired, or if it matches the * current timeline. */ - node = READ_ONCE(ref->cache); - if (node && node->timeline == idx) + it = READ_ONCE(ref->cache); + if (it) { + u64 cached = READ_ONCE(it->timeline); + + /* Once claimed, this slot will only belong to this idx */ + if (cached == idx) + return it; + +#ifdef CONFIG_64BIT /* for cmpxchg(u64) */ + /* + * An unclaimed cache [.timeline=0] can only be claimed once. + * + * If the value is already non-zero, some other thread has + * claimed the cache and we know that is does not match our + * idx. If, and only if, the timeline is currently zero is it + * worth competing to claim it atomically for ourselves (for + * only the winner of that race will cmpxchg return the old + * value of 0). + */ + if (!cached && !cmpxchg(&it->timeline, 0, idx)) + return it; +#endif + } + + BUILD_BUG_ON(offsetof(typeof(*it), node)); + + /* While active, the tree can only be built; not destroyed */ + GEM_BUG_ON(i915_active_is_idle(ref)); + + it = fetch_node(ref->tree.rb_node); + while (it) { + if (it->timeline < idx) { + it = fetch_node(it->node.rb_right); + } else if (it->timeline > idx) { + it = fetch_node(it->node.rb_left); + } else { + WRITE_ONCE(ref->cache, it); + break; + } + } + + /* NB: If the tree rotated beneath us, we may miss our target. */ + return it; +} + +static struct i915_active_fence * +active_instance(struct i915_active *ref, u64 idx) +{ + struct active_node *node, *prealloc; + struct rb_node **p, *parent; + + node = __active_lookup(ref, idx); + if (likely(node)) return &node->base; /* Preallocate a replacement, just in case */ @@ -268,10 +337,9 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) rb_insert_color(&node->node, &ref->tree); out: - ref->cache = node; + WRITE_ONCE(ref->cache, node); spin_unlock_irq(&ref->tree_lock); - BUILD_BUG_ON(offsetof(typeof(*node), base)); return &node->base; } @@ -353,69 +421,116 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node) return ____active_del_barrier(ref, node, barrier_to_engine(node)); } -int i915_active_ref(struct i915_active *ref, - struct intel_timeline *tl, - struct dma_fence *fence) +static bool +replace_barrier(struct i915_active *ref, struct i915_active_fence *active) +{ + if (!is_barrier(active)) /* proto-node used by our idle barrier? */ + return false; + + /* + * This request is on the kernel_context timeline, and so + * we can use it to substitute for the pending idle-barrer + * request that we want to emit on the kernel_context. + */ + __active_del_barrier(ref, node_from_active(active)); + return true; +} + +int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) { struct i915_active_fence *active; int err; - lockdep_assert_held(&tl->mutex); - /* Prevent reaping in case we malloc/wait while building the tree */ err = i915_active_acquire(ref); if (err) return err; - active = active_instance(ref, tl); + active = active_instance(ref, idx); if (!active) { err = -ENOMEM; goto out; } - if (is_barrier(active)) { /* proto-node used by our idle barrier */ - /* - * This request is on the kernel_context timeline, and so - * we can use it to substitute for the pending idle-barrer - * request that we want to emit on the kernel_context. - */ - __active_del_barrier(ref, node_from_active(active)); + if (replace_barrier(ref, active)) { RCU_INIT_POINTER(active->fence, NULL); atomic_dec(&ref->count); } if (!__i915_active_fence_set(active, fence)) - atomic_inc(&ref->count); + __i915_active_acquire(ref); out: i915_active_release(ref); return err; } -struct dma_fence * -i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) +static struct dma_fence * +__i915_active_set_fence(struct i915_active *ref, + struct i915_active_fence *active, + struct dma_fence *fence) { struct dma_fence *prev; - /* We expect the caller to manage the exclusive timeline ordering */ - GEM_BUG_ON(i915_active_is_idle(ref)); + if (replace_barrier(ref, active)) { + RCU_INIT_POINTER(active->fence, fence); + return NULL; + } rcu_read_lock(); - prev = __i915_active_fence_set(&ref->excl, f); + prev = __i915_active_fence_set(active, fence); if (prev) prev = dma_fence_get_rcu(prev); else - atomic_inc(&ref->count); + __i915_active_acquire(ref); rcu_read_unlock(); return prev; } +static struct i915_active_fence * +__active_fence(struct i915_active *ref, u64 idx) +{ + struct active_node *it; + + it = __active_lookup(ref, idx); + if (unlikely(!it)) { /* Contention with parallel tree builders! */ + spin_lock_irq(&ref->tree_lock); + it = __active_lookup(ref, idx); + spin_unlock_irq(&ref->tree_lock); + } + GEM_BUG_ON(!it); /* slot must be preallocated */ + + return &it->base; +} + +struct dma_fence * +__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) +{ + /* Only valid while active, see i915_active_acquire_for_context() */ + return __i915_active_set_fence(ref, __active_fence(ref, idx), fence); +} + +struct dma_fence * +i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) +{ + /* We expect the caller to manage the exclusive timeline ordering */ + return __i915_active_set_fence(ref, &ref->excl, f); +} + bool i915_active_acquire_if_busy(struct i915_active *ref) { debug_active_assert(ref); return atomic_add_unless(&ref->count, 1, 0); } +static void __i915_active_activate(struct i915_active *ref) +{ + spin_lock_irq(&ref->tree_lock); /* __active_retire() */ + if (!atomic_fetch_inc(&ref->count)) + debug_active_activate(ref); + spin_unlock_irq(&ref->tree_lock); +} + int i915_active_acquire(struct i915_active *ref) { int err; @@ -423,19 +538,19 @@ int i915_active_acquire(struct i915_active *ref) if (i915_active_acquire_if_busy(ref)) return 0; + if (!ref->active) { + __i915_active_activate(ref); + return 0; + } + err = mutex_lock_interruptible(&ref->mutex); if (err) return err; if (likely(!i915_active_acquire_if_busy(ref))) { - if (ref->active) - err = ref->active(ref); - if (!err) { - spin_lock_irq(&ref->tree_lock); /* __active_retire() */ - debug_active_activate(ref); - atomic_inc(&ref->count); - spin_unlock_irq(&ref->tree_lock); - } + err = ref->active(ref); + if (!err) + __i915_active_activate(ref); } mutex_unlock(&ref->mutex); @@ -443,6 +558,24 @@ int i915_active_acquire(struct i915_active *ref) return err; } +int i915_active_acquire_for_context(struct i915_active *ref, u64 idx) +{ + struct i915_active_fence *active; + int err; + + err = i915_active_acquire(ref); + if (err) + return err; + + active = active_instance(ref, idx); + if (!active) { + i915_active_release(ref); + return -ENOMEM; + } + + return 0; /* return with active ref */ +} + void i915_active_release(struct i915_active *ref) { debug_active_assert(ref); @@ -651,16 +784,16 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence, return await_active(ref, flags, sw_await_fence, fence, fence); } -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void i915_active_fini(struct i915_active *ref) { debug_active_fini(ref); GEM_BUG_ON(atomic_read(&ref->count)); GEM_BUG_ON(work_pending(&ref->work)); - GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); mutex_destroy(&ref->mutex); + + if (ref->cache) + kmem_cache_free(global.slab_cache, ref->cache); } -#endif static inline bool is_idle_barrier(struct active_node *node, u64 idx) { @@ -674,7 +807,6 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) if (RB_EMPTY_ROOT(&ref->tree)) return NULL; - spin_lock_irq(&ref->tree_lock); GEM_BUG_ON(i915_active_is_idle(ref)); /* @@ -700,9 +832,9 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) prev = p; if (node->timeline < idx) - p = p->rb_right; + p = READ_ONCE(p->rb_right); else - p = p->rb_left; + p = READ_ONCE(p->rb_left); } /* @@ -739,14 +871,13 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) goto match; } - spin_unlock_irq(&ref->tree_lock); - return NULL; match: + spin_lock_irq(&ref->tree_lock); rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ if (p == &ref->cache->node) - ref->cache = NULL; + WRITE_ONCE(ref->cache, NULL); spin_unlock_irq(&ref->tree_lock); return rb_entry(p, struct active_node, node); @@ -758,7 +889,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, intel_engine_mask_t tmp, mask = engine->mask; struct llist_node *first = NULL, *last = NULL; struct intel_gt *gt = engine->gt; - int err; GEM_BUG_ON(i915_active_is_idle(ref)); @@ -778,13 +908,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct llist_node *prev = first; struct active_node *node; + rcu_read_lock(); node = reuse_idle_barrier(ref, idx); + rcu_read_unlock(); if (!node) { node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); - if (!node) { - err = ENOMEM; + if (!node) goto unwind; - } RCU_INIT_POINTER(node->base.fence, NULL); node->base.cb.func = node_retire; @@ -804,7 +934,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, */ RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); node->base.cb.node.prev = (void *)engine; - atomic_inc(&ref->count); + __i915_active_acquire(ref); } GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); @@ -832,7 +962,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, kmem_cache_free(global.slab_cache, node); } - return err; + return -ENOMEM; } void i915_active_acquire_barrier(struct i915_active *ref) diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index cf40581509667d5f3c44ee22f2cf8728fc37a4a0..fb165d3f01cf378ed8fbcde6181682199fdc68f8 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -163,14 +163,16 @@ void __i915_active_init(struct i915_active *ref, __i915_active_init(ref, active, retire, &__mkey, &__wkey); \ } while (0) -int i915_active_ref(struct i915_active *ref, - struct intel_timeline *tl, - struct dma_fence *fence); +struct dma_fence * +__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence); +int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence); static inline int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) { - return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence); + return i915_active_ref(ref, + i915_request_timeline(rq)->fence_context, + &rq->fence); } struct dma_fence * @@ -198,7 +200,9 @@ int i915_request_await_active(struct i915_request *rq, #define I915_ACTIVE_AWAIT_BARRIER BIT(2) int i915_active_acquire(struct i915_active *ref); +int i915_active_acquire_for_context(struct i915_active *ref, u64 idx); bool i915_active_acquire_if_busy(struct i915_active *ref); + void i915_active_release(struct i915_active *ref); static inline void __i915_active_acquire(struct i915_active *ref) @@ -213,11 +217,7 @@ i915_active_is_idle(const struct i915_active *ref) return !atomic_read(&ref->count); } -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void i915_active_fini(struct i915_active *ref); -#else -static inline void i915_active_fini(struct i915_active *ref) { } -#endif int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine); @@ -231,4 +231,19 @@ struct i915_active *i915_active_create(void); struct i915_active *i915_active_get(struct i915_active *ref); void i915_active_put(struct i915_active *ref); +static inline int __i915_request_await_exclusive(struct i915_request *rq, + struct i915_active *active) +{ + struct dma_fence *fence; + int err = 0; + + fence = i915_active_fence_get(&active->excl); + if (fence) { + err = i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + } + + return err; +} + #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 372354d33f55210d322422f0e193e319d2ec9522..5ac4a999f05a6ebb58ff9a8092d2a474416f97c7 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1204,6 +1204,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, return dst; } +static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc, + const u32 cmd) +{ + return desc->cmd.value == (cmd & desc->cmd.mask); +} + static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, u32 length) @@ -1242,19 +1248,19 @@ static bool check_cmd(const struct intel_engine_cs *engine, * allowed mask/value pair given in the whitelist entry. */ if (reg->mask) { - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { + if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) { DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n", reg_addr); return false; } - if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) { DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n", reg_addr); return false; } - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && + if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n", @@ -1478,7 +1484,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, break; } - if (desc->cmd.value == MI_BATCH_BUFFER_START) { + if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) { ret = check_bbstart(cmd, offset, length, batch_length, batch_addr, shadow_addr, jump_whitelist); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5fd5af4bc8551320977b814bc667f75dc1a0488b..00292a849c34cb8cd1e1ee7e5f34f4fb91cb79d8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -392,7 +392,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) pre |= IS_HSW_EARLY_SDV(dev_priv); pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); - pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0); + pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0); pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2); if (pre) { @@ -1075,6 +1075,7 @@ static void i915_driver_release(struct drm_device *dev) intel_memory_regions_driver_release(dev_priv); i915_ggtt_driver_release(dev_priv); + i915_gem_drain_freed_objects(dev_priv); i915_driver_mmio_release(dev_priv); @@ -1119,7 +1120,6 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; i915_gem_context_close(file); - i915_gem_release(dev, file); kfree_rcu(file_priv, rcu); @@ -1846,7 +1846,8 @@ static struct drm_driver driver = { */ .driver_features = DRIVER_GEM | - DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ, + DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | + DRIVER_SYNCOBJ_TIMELINE, .release = i915_driver_release, .open = i915_driver_open, .lastclose = i915_driver_lastclose, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e4f7f6518945be52a983b43eba13c7a5238fec96..ab17084af0ff40e98d276bf2802aece074f7744e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -108,8 +108,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200715" -#define DRIVER_TIMESTAMP 1594811881 +#define DRIVER_DATE "20200824" +#define DRIVER_TIMESTAMP 1598293597 struct drm_i915_gem_object; @@ -203,11 +203,6 @@ struct drm_i915_file_private { struct rcu_head rcu; }; - struct { - spinlock_t lock; - struct list_head request_list; - } mm; - struct xarray context_xa; struct xarray vm_xa; @@ -506,6 +501,7 @@ struct i915_psr { bool link_standby; bool colorimetry_support; bool psr2_enabled; + bool psr2_sel_fetch_enabled; u8 sink_sync_latency; ktime_t last_entry_attempt; ktime_t last_exit; @@ -591,11 +587,6 @@ struct i915_gem_mm { */ atomic_t free_count; - /** - * Small stash of WC pages - */ - struct pagestash wc_stash; - /** * tmpfs instance used for shmem backed objects */ @@ -1044,6 +1035,14 @@ struct drm_i915_private { struct intel_l3_parity l3_parity; + /* + * HTI (aka HDPORT) state read during initial hw readout. Most + * platforms don't have HTI, so this will just stay 0. Those that do + * will use this later to figure out which PLLs and PHYs are unavailable + * for driver usage. + */ + u32 hti_state; + /* * edram size in MB. * Cannot be determined by PCIID. You must always read a register. @@ -1489,6 +1488,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_ICL_WITH_PORT_F(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) +#define IS_TGL_U(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT) + +#define IS_TGL_Y(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX) + #define SKL_REVID_A0 0x0 #define SKL_REVID_B0 0x1 #define SKL_REVID_C0 0x2 @@ -1509,14 +1514,34 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_BXT_REVID(dev_priv, since, until) \ (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) -#define KBL_REVID_A0 0x0 -#define KBL_REVID_B0 0x1 -#define KBL_REVID_C0 0x2 -#define KBL_REVID_D0 0x3 -#define KBL_REVID_E0 0x4 +enum { + KBL_REVID_A0, + KBL_REVID_B0, + KBL_REVID_B1, + KBL_REVID_C0, + KBL_REVID_D0, + KBL_REVID_D1, + KBL_REVID_E0, + KBL_REVID_F0, + KBL_REVID_G0, +}; + +struct i915_rev_steppings { + u8 gt_stepping; + u8 disp_stepping; +}; + +/* Defined in intel_workarounds.c */ +extern const struct i915_rev_steppings kbl_revids[]; -#define IS_KBL_REVID(dev_priv, since, until) \ - (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) +#define IS_KBL_GT_REVID(dev_priv, since, until) \ + (IS_KABYLAKE(dev_priv) && \ + kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \ + kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until) +#define IS_KBL_DISP_REVID(dev_priv, since, until) \ + (IS_KABYLAKE(dev_priv) && \ + kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \ + kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until) #define GLK_REVID_A0 0x0 #define GLK_REVID_A1 0x1 @@ -1665,6 +1690,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) +#define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12) #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) @@ -1790,11 +1816,18 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) } struct i915_vma * __must_check +i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + const struct i915_ggtt_view *view, + u64 size, u64 alignment, u64 flags); + +static inline struct i915_vma * __must_check i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, - u64 size, - u64 alignment, - u64 flags); + u64 size, u64 alignment, u64 flags) +{ + return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags); +} int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags); @@ -1831,7 +1864,6 @@ void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); -void i915_gem_release(struct drm_device *dev, struct drm_file *file); int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9aa3066cb75d901e22f733530e424088eb563bce..bb0c12975f38e577309d099544edbd097a7405f7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -335,12 +335,20 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, u64 remain; int ret; - ret = i915_gem_object_prepare_read(obj, &needs_clflush); + ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) return ret; + ret = i915_gem_object_prepare_read(obj, &needs_clflush); + if (ret) { + i915_gem_object_unlock(obj); + return ret; + } + fence = i915_gem_object_lock_fence(obj); i915_gem_object_finish_access(obj); + i915_gem_object_unlock(obj); + if (!fence) return -ENOMEM; @@ -420,7 +428,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, GEM_BUG_ON(!drm_mm_node_allocated(&node)); } - ret = i915_gem_object_lock_interruptible(obj); + ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) goto out_unpin; @@ -619,7 +627,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, GEM_BUG_ON(!drm_mm_node_allocated(&node)); } - ret = i915_gem_object_lock_interruptible(obj); + ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) goto out_unpin; @@ -734,12 +742,20 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, u64 remain; int ret; - ret = i915_gem_object_prepare_write(obj, &needs_clflush); + ret = i915_gem_object_lock_interruptible(obj, NULL); if (ret) return ret; + ret = i915_gem_object_prepare_write(obj, &needs_clflush); + if (ret) { + i915_gem_object_unlock(obj); + return ret; + } + fence = i915_gem_object_lock_fence(obj); i915_gem_object_finish_access(obj); + i915_gem_object_unlock(obj); + if (!fence) return -ENOMEM; @@ -946,11 +962,10 @@ static void discard_ggtt_vma(struct i915_vma *vma) } struct i915_vma * -i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view, - u64 size, - u64 alignment, - u64 flags) +i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + const struct i915_ggtt_view *view, + u64 size, u64 alignment, u64 flags) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; @@ -1016,7 +1031,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL); if (ret) return ERR_PTR(ret); @@ -1290,7 +1305,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) i915_gem_drain_freed_objects(i915); list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); drm_WARN_ON(&i915->drm, i915_gem_object_set_to_cpu_domain(obj, true)); i915_gem_object_unlock(obj); @@ -1301,21 +1316,6 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) return 0; } -void i915_gem_release(struct drm_device *dev, struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_request *request; - - /* Clean up our request list when the client is going away, so that - * later retire_requests won't dereference our soon-to-be-gone - * file_priv. - */ - spin_lock(&file_priv->mm.lock); - list_for_each_entry(request, &file_priv->mm.request_list, client_link) - request->file_priv = NULL; - spin_unlock(&file_priv->mm.lock); -} - int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv; @@ -1331,9 +1331,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) file_priv->dev_priv = i915; file_priv->file = file; - spin_lock_init(&file_priv->mm.lock); - INIT_LIST_HEAD(&file_priv->mm.request_list); - file_priv->bsd_engine = -1; file_priv->hang_timestamp = jiffies; @@ -1344,6 +1341,58 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) return ret; } +void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr) +{ + ww_acquire_init(&ww->ctx, &reservation_ww_class); + INIT_LIST_HEAD(&ww->obj_list); + ww->intr = intr; + ww->contended = NULL; +} + +static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww) +{ + struct drm_i915_gem_object *obj; + + while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) { + list_del(&obj->obj_link); + i915_gem_object_unlock(obj); + } +} + +void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj) +{ + list_del(&obj->obj_link); + i915_gem_object_unlock(obj); +} + +void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww) +{ + i915_gem_ww_ctx_unlock_all(ww); + WARN_ON(ww->contended); + ww_acquire_fini(&ww->ctx); +} + +int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww) +{ + int ret = 0; + + if (WARN_ON(!ww->contended)) + return -EINVAL; + + i915_gem_ww_ctx_unlock_all(ww); + if (ww->intr) + ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx); + else + dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx); + + if (!ret) + list_add_tail(&ww->contended->obj_link, &ww->obj_list); + + ww->contended = NULL; + + return ret; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_gem_device.c" #include "selftests/i915_gem.c" diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index f333e88a2b6ed531cbc2300beb134a5ed14168ac..a4cad3f154caf189c4b5731e47b1a87ebd757182 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -116,4 +116,16 @@ static inline bool __tasklet_is_scheduled(struct tasklet_struct *t) return test_bit(TASKLET_STATE_SCHED, &t->state); } +struct i915_gem_ww_ctx { + struct ww_acquire_ctx ctx; + struct list_head obj_list; + bool intr; + struct drm_i915_gem_object *contended; +}; + +void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr); +void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx); +int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx); +void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj); + #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 421613219ae9cd4c6ecbe50783b259fc89b3310a..f96032c60a12df1e4d687653134401c6e88094f2 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -132,6 +132,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, case I915_PARAM_HAS_EXEC_BATCH_FIRST: case I915_PARAM_HAS_EXEC_FENCE_ARRAY: case I915_PARAM_HAS_EXEC_SUBMIT_FENCE: + case I915_PARAM_HAS_EXEC_TIMELINE_FENCES: /* For the time being all of these are always true; * if some supported hardware does not have one of these * features this value needs to be provided from diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 6a3a2ce0b394789e0f25ed9bf04b0af305b20951..3e6cbb0d1150ebe8999ca6b0609adacff0413d43 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1159,7 +1159,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee) switch (engine->id) { default: MISSING_CASE(engine->id); - /* fall through */ + fallthrough; case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1fa67700d8f4991225682c166c4d82bfd8f434f9..f113fe44572b5e254582753ff79eb7584b6d5256 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -41,6 +41,7 @@ #include "display/intel_lpe_audio.h" #include "display/intel_psr.h" +#include "gt/intel_breadcrumbs.h" #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8d8db9ff0a484ccc62a07270d2ce172b4ac3c2cc..7f139ea4a90b205832791269a19b95569978dc41 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -102,6 +102,11 @@ i915_param_named(psr_safest_params, bool, 0400, "is helpful to detect if PSR issues are related to bad values set in " " VBT. (0=use VBT parameters, 1=use safest parameters)"); +i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400, + "Enable PSR2 selective fetch " + "(0=disabled, 1=enabled) " + "Default: 0"); + i915_param_named_unsafe(force_probe, charp, 0400, "Force probe the driver for specified devices. " "See CONFIG_DRM_I915_FORCE_PROBE for details."); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 53fb5ba8fbed6bcdef15e510677ac85ffc4cb996..330c03e2b4f7051a2dae20e0912a4c930c997f1f 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -54,6 +54,7 @@ struct drm_printer; param(int, enable_fbc, -1, 0600) \ param(int, enable_psr, -1, 0600) \ param(bool, psr_safest_params, false, 0600) \ + param(bool, enable_psr2_sel_fetch, false, 0600) \ param(int, disable_power_well, -1, 0400) \ param(int, enable_ips, 1, 0600) \ param(int, invert_brightness, 0, 0600) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 2338f92ce4900aa0cfcfee2c4643fe9b43fe6733..366ddfc8df6b65982a69445f83943506e5a86724 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -890,6 +890,7 @@ static const struct intel_device_info rkl_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .require_force_probe = 1, + .display.has_hti = 1, .display.has_psr_hw_tracking = 0, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index c6f6370283cf44bf6f82dcc57bf1d2ae34bc4029..e94976976571fe61181dd87f24cb3227eb7a544a 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) struct i915_gem_engines_iter it; struct i915_gem_context *ctx = stream->ctx; struct intel_context *ce; - int err; + struct i915_gem_ww_ctx ww; + int err = -ENODEV; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { if (ce->engine != stream->engine) /* first match! */ continue; - /* - * As the ID is the gtt offset of the context's vma we - * pin the vma to ensure the ID remains fixed. - */ - err = intel_context_pin(ce); - if (err == 0) { - stream->pinned_ctx = ce; - break; - } + err = 0; + break; } i915_gem_context_unlock_engines(ctx); + if (err) + return ERR_PTR(err); + + i915_gem_ww_ctx_init(&ww, true); +retry: + /* + * As the ID is the gtt offset of the context's vma we + * pin the vma to ensure the ID remains fixed. + */ + err = intel_context_pin_ww(ce, &ww); + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + if (err) + return ERR_PTR(err); + + stream->pinned_ctx = ce; return stream->pinned_ctx; } @@ -1923,15 +1938,22 @@ emit_oa_config(struct i915_perf_stream *stream, { struct i915_request *rq; struct i915_vma *vma; + struct i915_gem_ww_ctx ww; int err; vma = get_oa_vma(stream, oa_config); if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock(vma->obj, &ww); + if (err) + goto err; + + err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) - goto err_vma_put; + goto err; intel_engine_pm_get(ce->engine); rq = i915_request_create(ce); @@ -1953,11 +1975,9 @@ emit_oa_config(struct i915_perf_stream *stream, goto err_add_request; } - i915_vma_lock(vma); err = i915_request_await_object(rq, vma->obj, 0); if (!err) err = i915_vma_move_to_active(vma, rq, 0); - i915_vma_unlock(vma); if (err) goto err_add_request; @@ -1971,7 +1991,14 @@ emit_oa_config(struct i915_perf_stream *stream, i915_request_add(rq); err_vma_unpin: i915_vma_unpin(vma); -err_vma_put: +err: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + + i915_gem_ww_ctx_fini(&ww); i915_vma_put(vma); return err; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 28bc5f13ae521eeadf44035ec69fb35ec7f92cf7..69c0fa20eba17b031b40073e63250715bbab02be 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -445,8 +445,6 @@ static void i915_pmu_event_destroy(struct perf_event *event) container_of(event->pmu, typeof(*i915), pmu.base); drm_WARN_ON(&i915->drm, event->parent); - - module_put(THIS_MODULE); } static int @@ -476,7 +474,7 @@ config_status(struct drm_i915_private *i915, u64 config) if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) /* Requires a mutex for sampling! */ return -ENODEV; - /* Fall-through. */ + fallthrough; case I915_PMU_REQUESTED_FREQUENCY: if (INTEL_GEN(i915) < 6) return -ENODEV; @@ -538,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event) if (ret) return ret; - if (!event->parent) { - __module_get(THIS_MODULE); + if (!event->parent) event->destroy = i915_pmu_event_destroy; - } return 0; } @@ -1130,6 +1126,7 @@ void i915_pmu_register(struct drm_i915_private *i915) if (!pmu->base.attr_groups) goto err_attr; + pmu->base.module = THIS_MODULE; pmu->base.task_ctx_nr = perf_invalid_context; pmu->base.event_init = i915_pmu_event_init; pmu->base.add = i915_pmu_event_add; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4e796ff4d7d0ff16193c94788afef66dc0fa20f4..ac691927e29d8c89668e5fcb1b8407b7e9f020ed 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1898,6 +1898,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define PWR_DOWN_LN_3_1_0 (0xb << 4) #define PWR_DOWN_LN_MASK (0xf << 4) #define PWR_DOWN_LN_SHIFT 4 +#define EDP4K2K_MODE_OVRD_EN (1 << 3) +#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2) #define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) #define ICL_LANE_ENABLE_AUX (1 << 0) @@ -2919,6 +2921,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MBUS_BBOX_CTL_S1 _MMIO(0x45040) #define MBUS_BBOX_CTL_S2 _MMIO(0x45044) +#define HDPORT_STATE _MMIO(0x45050) +#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12) +#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2) +#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1) +#define HDPORT_ENABLED REG_BIT(0) + /* Make render/texture TLB fetches lower priorty than associated data * fetches. This is not turned on by default */ @@ -7870,6 +7878,7 @@ enum { # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 _MMIO(0x42080) +#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16) #define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) @@ -8711,6 +8720,7 @@ enum { #define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31) #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29) +#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15) #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14) #define CNP_PWM_CGE_GATING_DISABLE (1 << 13) #define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12) @@ -9217,8 +9227,8 @@ enum { #define DISPLAY_IPS_CONTROL 0x19 #define TGL_PCODE_TCCOLD 0x26 #define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0) -#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0 -#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0) +#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0 +#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0) /* See also IPS_CTL */ #define IPS_PCODE_CONTROL (1 << 30) #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A @@ -10277,12 +10287,18 @@ enum skl_power_gate { #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) #define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) +#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10) #define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \ (tc_port) + 12 : \ (tc_port) - PORT_TC4 + 21)) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27) +#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \ + (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \ + ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) /* CNL PLL */ #define DPLL0_ENABLE 0x46010 @@ -10503,19 +10519,21 @@ enum skl_power_gate { #define _TGL_DPLL0_CFGCR0 0x164284 #define _TGL_DPLL1_CFGCR0 0x16428C -/* TODO: add DPLL4 */ #define _TGL_TBTPLL_CFGCR0 0x16429C #define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ _TGL_DPLL1_CFGCR0, \ _TGL_TBTPLL_CFGCR0) +#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \ + _TGL_DPLL1_CFGCR0) #define _TGL_DPLL0_CFGCR1 0x164288 #define _TGL_DPLL1_CFGCR1 0x164290 -/* TODO: add DPLL4 */ #define _TGL_TBTPLL_CFGCR1 0x1642A0 #define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ _TGL_DPLL1_CFGCR1, \ _TGL_TBTPLL_CFGCR1) +#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \ + _TGL_DPLL1_CFGCR1) #define _DKL_PHY1_BASE 0x168000 #define _DKL_PHY2_BASE 0x169000 @@ -12336,4 +12354,10 @@ enum skl_power_gate { #define DSB_ENABLE (1 << 31) #define DSB_STATUS (1 << 0) +#define TGL_ROOT_DEVICE_ID 0x9A00 +#define TGL_ROOT_DEVICE_MASK 0xFF00 +#define TGL_ROOT_DEVICE_SKU_MASK 0xF +#define TGL_ROOT_DEVICE_SKU_ULX 0x2 +#define TGL_ROOT_DEVICE_SKU_ULT 0x4 + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 0b2fe55e6194573409582e56b263d40c028cea6c..11e272422fb754c7b5fe8476ad830d19864dcd22 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -31,6 +31,7 @@ #include #include "gem/i915_gem_context.h" +#include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" #include "gt/intel_ring.h" #include "gt/intel_rps.h" @@ -186,48 +187,34 @@ static void irq_execute_cb_hook(struct irq_work *wrk) irq_execute_cb(wrk); } -static void __notify_execute_cb(struct i915_request *rq) +static __always_inline void +__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) { struct execute_cb *cb, *cn; - lockdep_assert_held(&rq->lock); - - GEM_BUG_ON(!i915_request_is_active(rq)); if (llist_empty(&rq->execute_cb)) return; - llist_for_each_entry_safe(cb, cn, rq->execute_cb.first, work.llnode) - irq_work_queue(&cb->work); - - /* - * XXX Rollback on __i915_request_unsubmit() - * - * In the future, perhaps when we have an active time-slicing scheduler, - * it will be interesting to unsubmit parallel execution and remove - * busywaits from the GPU until their master is restarted. This is - * quite hairy, we have to carefully rollback the fence and do a - * preempt-to-idle cycle on the target engine, all the while the - * master execute_cb may refire. - */ - init_llist_head(&rq->execute_cb); + llist_for_each_entry_safe(cb, cn, + llist_del_all(&rq->execute_cb), + work.llnode) + fn(&cb->work); } -static inline void -remove_from_client(struct i915_request *request) +static void __notify_execute_cb_irq(struct i915_request *rq) { - struct drm_i915_file_private *file_priv; + __notify_execute_cb(rq, irq_work_queue); +} - if (!READ_ONCE(request->file_priv)) - return; +static bool irq_work_imm(struct irq_work *wrk) +{ + wrk->func(wrk); + return false; +} - rcu_read_lock(); - file_priv = xchg(&request->file_priv, NULL); - if (file_priv) { - spin_lock(&file_priv->mm.lock); - list_del(&request->client_link); - spin_unlock(&file_priv->mm.lock); - } - rcu_read_unlock(); +static void __notify_execute_cb_imm(struct i915_request *rq) +{ + __notify_execute_cb(rq, irq_work_imm); } static void free_capture_list(struct i915_request *request) @@ -274,9 +261,16 @@ static void remove_from_engine(struct i915_request *rq) locked = engine; } list_del_init(&rq->sched.link); + clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); + + /* Prevent further __await_execution() registering a cb, then flush */ + set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + spin_unlock_irq(&locked->active.lock); + + __notify_execute_cb_imm(rq); } bool i915_request_retire(struct i915_request *rq) @@ -288,6 +282,7 @@ bool i915_request_retire(struct i915_request *rq) GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); trace_i915_request_retire(rq); + i915_request_mark_complete(rq); /* * We know the GPU must have read the request to have @@ -305,32 +300,30 @@ bool i915_request_retire(struct i915_request *rq) __i915_request_fill(rq, POISON_FREE); rq->ring->head = rq->postfix; + if (!i915_request_signaled(rq)) { + spin_lock_irq(&rq->lock); + dma_fence_signal_locked(&rq->fence); + spin_unlock_irq(&rq->lock); + } + + if (i915_request_has_waitboost(rq)) { + GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); + atomic_dec(&rq->engine->gt->rps.num_waiters); + } + /* * We only loosely track inflight requests across preemption, * and so we may find ourselves attempting to retire a _completed_ * request that we have removed from the HW and put back on a run * queue. + * + * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be + * after removing the breadcrumb and signaling it, so that we do not + * inadvertently attach the breadcrumb to a completed request. */ remove_from_engine(rq); - - spin_lock_irq(&rq->lock); - i915_request_mark_complete(rq); - if (!i915_request_signaled(rq)) - dma_fence_signal_locked(&rq->fence); - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) - i915_request_cancel_breadcrumb(rq); - if (i915_request_has_waitboost(rq)) { - GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); - atomic_dec(&rq->engine->gt->rps.num_waiters); - } - if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { - set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); - __notify_execute_cb(rq); - } GEM_BUG_ON(!llist_empty(&rq->execute_cb)); - spin_unlock_irq(&rq->lock); - remove_from_client(rq); __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ intel_context_exit(rq->context); @@ -357,12 +350,6 @@ void i915_request_retire_upto(struct i915_request *rq) } while (i915_request_retire(tmp) && tmp != rq); } -static void __llist_add(struct llist_node *node, struct llist_head *head) -{ - node->next = head->first; - head->first = node; -} - static struct i915_request * const * __engine_active(struct intel_engine_cs *engine) { @@ -388,17 +375,38 @@ static bool __request_in_flight(const struct i915_request *signal) * As we know that there are always preemption points between * requests, we know that only the currently executing request * may be still active even though we have cleared the flag. - * However, we can't rely on our tracking of ELSP[0] to known + * However, we can't rely on our tracking of ELSP[0] to know * which request is currently active and so maybe stuck, as * the tracking maybe an event behind. Instead assume that * if the context is still inflight, then it is still active * even if the active flag has been cleared. + * + * To further complicate matters, if there a pending promotion, the HW + * may either perform a context switch to the second inflight execlists, + * or it may switch to the pending set of execlists. In the case of the + * latter, it may send the ACK and we process the event copying the + * pending[] over top of inflight[], _overwriting_ our *active. Since + * this implies the HW is arbitrating and not struck in *active, we do + * not worry about complete accuracy, but we do require no read/write + * tearing of the pointer [the read of the pointer must be valid, even + * as the array is being overwritten, for which we require the writes + * to avoid tearing.] + * + * Note that the read of *execlists->active may race with the promotion + * of execlists->pending[] to execlists->inflight[], overwritting + * the value at *execlists->active. This is fine. The promotion implies + * that we received an ACK from the HW, and so the context is not + * stuck -- if we do not see ourselves in *active, the inflight status + * is valid. If instead we see ourselves being copied into *active, + * we are inflight and may signal the callback. */ if (!intel_context_inflight(signal->context)) return false; rcu_read_lock(); - for (port = __engine_active(signal->engine); (rq = *port); port++) { + for (port = __engine_active(signal->engine); + (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */ + port++) { if (rq->context == signal->context) { inflight = i915_seqno_passed(rq->fence.seqno, signal->fence.seqno); @@ -439,18 +447,24 @@ __await_execution(struct i915_request *rq, cb->work.func = irq_execute_cb_hook; } - spin_lock_irq(&signal->lock); - if (i915_request_is_active(signal) || __request_in_flight(signal)) { - if (hook) { - hook(rq, &signal->fence); - i915_request_put(signal); - } - i915_sw_fence_complete(cb->fence); - kmem_cache_free(global.slab_execute_cbs, cb); - } else { - __llist_add(&cb->work.llnode, &signal->execute_cb); + /* + * Register the callback first, then see if the signaler is already + * active. This ensures that if we race with the + * __notify_execute_cb from i915_request_submit() and we are not + * included in that list, we get a second bite of the cherry and + * execute it ourselves. After this point, a future + * i915_request_submit() will notify us. + * + * In i915_request_retire() we set the ACTIVE bit on a completed + * request (then flush the execute_cb). So by registering the + * callback first, then checking the ACTIVE bit, we serialise with + * the completed/retired request. + */ + if (llist_add(&cb->work.llnode, &signal->execute_cb)) { + if (i915_request_is_active(signal) || + __request_in_flight(signal)) + __notify_execute_cb_imm(signal); } - spin_unlock_irq(&signal->lock); return 0; } @@ -566,18 +580,28 @@ bool __i915_request_submit(struct i915_request *request) clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); } + /* + * XXX Rollback bonded-execution on __i915_request_unsubmit()? + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + __notify_execute_cb_irq(request); + /* We may be recursing from the signal callback of another i915 fence */ if (!i915_request_signaled(request)) { spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - __notify_execute_cb(request); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && !i915_request_enable_breadcrumb(request)) intel_engine_signal_breadcrumbs(engine); spin_unlock(&request->lock); - GEM_BUG_ON(!llist_empty(&request->execute_cb)); } return result; @@ -600,27 +624,27 @@ void __i915_request_unsubmit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; + /* + * Only unwind in reverse order, required so that the per-context list + * is kept in seqno/ring order. + */ RQ_TRACE(request, "\n"); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->active.lock); /* - * Only unwind in reverse order, required so that the per-context list - * is kept in seqno/ring order. + * Before we remove this breadcrumb from the signal list, we have + * to ensure that a concurrent dma_fence_enable_signaling() does not + * attach itself. We first mark the request as no longer active and + * make sure that is visible to other cores, and then remove the + * breadcrumb if attached. */ - - /* We may be recursing from the signal callback of another i915 fence */ - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); + clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) i915_request_cancel_breadcrumb(request); - GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); - clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); - - spin_unlock(&request->lock); - /* We've already spun, don't charge on resubmitting. */ if (request->sched.semaphores && i915_request_started(request)) request->sched.semaphores = 0; @@ -757,7 +781,6 @@ static void __i915_request_ctor(void *arg) dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0); - rq->file_priv = NULL; rq->capture_list = NULL; init_llist_head(&rq->execute_cb); @@ -847,7 +870,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) /* No zalloc, everything must be cleared after use */ rq->batch = NULL; - GEM_BUG_ON(rq->file_priv); GEM_BUG_ON(rq->capture_list); GEM_BUG_ON(!llist_empty(&rq->execute_cb)); @@ -1640,7 +1662,7 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) return this_cpu != cpu; } -static bool __i915_spin_request(const struct i915_request * const rq, int state) +static bool __i915_spin_request(struct i915_request * const rq, int state) { unsigned long timeout_ns; unsigned int cpu; @@ -1673,7 +1695,7 @@ static bool __i915_spin_request(const struct i915_request * const rq, int state) timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); timeout_ns += local_clock_ns(&cpu); do { - if (i915_request_completed(rq)) + if (dma_fence_is_signaled(&rq->fence)) return true; if (signal_pending_state(state, current)) @@ -1697,7 +1719,7 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) { struct request_wait *wait = container_of(cb, typeof(*wait), cb); - wake_up_process(wait->tsk); + wake_up_process(fetch_and_zero(&wait->tsk)); } /** @@ -1766,10 +1788,8 @@ long i915_request_wait(struct i915_request *rq, * duration, which we currently lack. */ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) && - __i915_spin_request(rq, state)) { - dma_fence_signal(&rq->fence); + __i915_spin_request(rq, state)) goto out; - } /* * This client is about to stall waiting for the GPU. In many cases @@ -1783,25 +1803,36 @@ long i915_request_wait(struct i915_request *rq, * but at a cost of spending more power processing the workload * (bad for battery). */ - if (flags & I915_WAIT_PRIORITY) { - if (!i915_request_started(rq) && - INTEL_GEN(rq->engine->i915) >= 6) - intel_rps_boost(rq); - } + if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq)) + intel_rps_boost(rq); wait.tsk = current; if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) goto out; + /* + * Flush the submission tasklet, but only if it may help this request. + * + * We sometimes experience some latency between the HW interrupts and + * tasklet execution (mostly due to ksoftirqd latency, but it can also + * be due to lazy CS events), so lets run the tasklet manually if there + * is a chance it may submit this request. If the request is not ready + * to run, as it is waiting for other fences to be signaled, flushing + * the tasklet is busy work without any advantage for this client. + * + * If the HW is being lazy, this is the last chance before we go to + * sleep to catch any pending events. We will check periodically in + * the heartbeat to flush the submission tasklets as a last resort + * for unhappy HW. + */ + if (i915_request_is_ready(rq)) + intel_engine_flush_submission(rq->engine); + for (;;) { set_current_state(state); - if (i915_request_completed(rq)) { - dma_fence_signal(&rq->fence); + if (dma_fence_is_signaled(&rq->fence)) break; - } - - intel_engine_flush_submission(rq->engine); if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; @@ -1817,7 +1848,9 @@ long i915_request_wait(struct i915_request *rq, } __set_current_state(TASK_RUNNING); - dma_fence_remove_callback(&rq->fence, &wait.cb); + if (READ_ONCE(wait.tsk)) + dma_fence_remove_callback(&rq->fence, &wait.cb); + GEM_BUG_ON(!list_empty(&wait.cb.node)); out: mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 5907628207611e59194d523794fdb8e3c46a117e..16b721080195f0aa1fb2f4e52c0ea8957d077655 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -284,10 +284,6 @@ struct i915_request { /** timeline->request entry for this request */ struct list_head link; - struct drm_i915_file_private *file_priv; - /** file_priv list entry for this request */ - struct list_head client_link; - I915_SELFTEST_DECLARE(struct { struct list_head link; unsigned long delay; @@ -365,10 +361,6 @@ void i915_request_submit(struct i915_request *request); void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); -/* Note: part of the intel_breadcrumbs family */ -bool i915_request_enable_breadcrumb(struct i915_request *request); -void i915_request_cancel_breadcrumb(struct i915_request *request); - long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 295b9829e2da519f8ff2933305dc76c2d7898a2a..4cd2038cbe35990c0a2e1a4d790dded6ec8d69b3 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, do { list_for_each_entry_safe(pos, next, &x->head, entry) { - pos->func(pos, - TASK_NORMAL, fence->error, - &extra); + int wake_flags; + + wake_flags = fence->error; + if (pos->func == autoremove_wake_function) + wake_flags = 0; + + pos->func(pos, TASK_NORMAL, wake_flags, &extra); } if (list_empty(&extra)) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bc64f773dcdb4dd2f188034cb9e804d730624bde..495d28f6d160163a40f20e3969a3ae6428936d62 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -291,6 +291,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_vma_work { struct dma_fence_work base; + struct i915_address_space *vm; + struct i915_vm_pt_stash stash; struct i915_vma *vma; struct drm_i915_gem_object *pinned; struct i915_sw_dma_fence_cb cb; @@ -302,13 +304,10 @@ static int __vma_bind(struct dma_fence_work *work) { struct i915_vma_work *vw = container_of(work, typeof(*vw), base); struct i915_vma *vma = vw->vma; - int err; - - err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags); - if (err) - atomic_or(I915_VMA_ERROR, &vma->flags); - return err; + vma->ops->bind_vma(vw->vm, &vw->stash, + vma, vw->cache_level, vw->flags); + return 0; } static void __vma_release(struct dma_fence_work *work) @@ -317,6 +316,9 @@ static void __vma_release(struct dma_fence_work *work) if (vw->pinned) __i915_gem_object_unpin_pages(vw->pinned); + + i915_vm_free_pt_stash(vw->vm, &vw->stash); + i915_vm_put(vw->vm); } static const struct dma_fence_work_ops bind_ops = { @@ -376,7 +378,6 @@ int i915_vma_bind(struct i915_vma *vma, { u32 bind_flags; u32 vma_flags; - int ret; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->size > vma->node.size); @@ -433,9 +434,7 @@ int i915_vma_bind(struct i915_vma *vma, work->pinned = vma->obj; } } else { - ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags); - if (ret) - return ret; + vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } atomic_or(bind_flags, &vma->flags); @@ -853,13 +852,19 @@ static void vma_unbind_pages(struct i915_vma *vma) __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); } -int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; intel_wakeref_t wakeref = 0; unsigned int bound; int err; +#ifdef CONFIG_PROVE_LOCKING + if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex)) + WARN_ON(!ww); +#endif + BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); @@ -873,16 +878,30 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (err) return err; + if (flags & PIN_GLOBAL) + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + if (flags & vma->vm->bind_async_flags) { work = i915_vma_work(); if (!work) { err = -ENOMEM; - goto err_pages; + goto err_rpm; } - } - if (flags & PIN_GLOBAL) - wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + work->vm = i915_vm_get(vma->vm); + + /* Allocate enough page directories to used PTE */ + if (vma->vm->allocate_va_range) { + i915_vm_alloc_pt_stash(vma->vm, + &work->stash, + vma->size); + + err = i915_vm_pin_pt_stash(vma->vm, + &work->stash); + if (err) + goto err_fence; + } + } /* * Differentiate between user/kernel vma inside the aliasing-ppgtt. @@ -971,9 +990,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) err_fence: if (work) dma_fence_work_commit_imm(&work->base); +err_rpm: if (wakeref) intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); -err_pages: vma_put_pages(vma); return err; } @@ -989,7 +1008,8 @@ static void flush_idle_contexts(struct intel_gt *gt) intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); } -int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) +int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u32 align, unsigned int flags) { struct i915_address_space *vm = vma->vm; int err; @@ -997,7 +1017,7 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) GEM_BUG_ON(!i915_vma_is_ggtt(vma)); do { - err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); + err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); if (err != -ENOSPC) { if (!err) { err = i915_vma_wait_for_bind(vma); @@ -1167,6 +1187,12 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } +static int +__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) +{ + return __i915_request_await_exclusive(rq, &vma->active); +} + int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) { int err; @@ -1174,8 +1200,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) GEM_BUG_ON(!i915_vma_is_pinned(vma)); /* Wait for the vma to be bound before we start! */ - err = i915_request_await_active(rq, &vma->active, - I915_ACTIVE_AWAIT_EXCL); + err = __i915_request_await_bind(rq, vma); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index d0d01f9095486aa67a19afe761e39d94df1004f0..5b3a3c6534540620eef52098ac22102ba5986605 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -237,8 +237,17 @@ static inline void i915_vma_unlock(struct i915_vma *vma) } int __must_check -i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); -int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags); +i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u64 size, u64 alignment, u64 flags); + +static inline int __must_check +i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + return i915_vma_pin_ww(vma, NULL, size, alignment, flags); +} + +int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u32 align, unsigned int flags); static inline int i915_vma_pin_count(const struct i915_vma *vma) { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 40c590db3c7616d91d50b9141f9919852cc075b3..e2aa5bc3a6e019b059bbc82f9dbf9c34dee1492e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -346,6 +346,25 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) mask = BIT(INTEL_SUBPLATFORM_PORTF); } + if (IS_TIGERLAKE(i915)) { + struct pci_dev *root, *pdev = i915->drm.pdev; + + root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list); + + drm_WARN_ON(&i915->drm, mask); + drm_WARN_ON(&i915->drm, (root->device & TGL_ROOT_DEVICE_MASK) != + TGL_ROOT_DEVICE_ID); + + switch (root->device & TGL_ROOT_DEVICE_SKU_MASK) { + case TGL_ROOT_DEVICE_SKU_ULX: + mask = BIT(INTEL_SUBPLATFORM_ULX); + break; + case TGL_ROOT_DEVICE_SKU_ULT: + mask = BIT(INTEL_SUBPLATFORM_ULT); + break; + } + } + GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS); RUNTIME_INFO(i915)->platform_mask[pi] |= mask; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index fd2385457ab665f94ca188fa59ccf36de72cee1a..6a3d607218aac3fd04b4786b37f35336df16b03f 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -146,6 +146,7 @@ enum intel_ppgtt_type { func(has_gmch); \ func(has_hdcp); \ func(has_hotplug); \ + func(has_hti); \ func(has_ipc); \ func(has_modular_fia); \ func(has_overlay); \ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cfabbe0481ab894bdb8ff8e0839715cf8882e735..b4bd19266b8c5e1710fdabd88f0c91c7e9d0e118 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -100,12 +100,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_MEMORY_WAKE); - - if (IS_SKYLAKE(dev_priv)) { - /* WaDisableDopClockGating */ - I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) - & ~GEN7_DOP_CLOCK_GATE_ENABLE); - } } static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7223,12 +7217,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) gen9_init_clock_gating(dev_priv); /* WaDisableSDEUnitClockGating:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* WaDisableGamClockGating:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); @@ -7251,6 +7245,10 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) { gen9_init_clock_gating(dev_priv); + /* WaDisableDopClockGating:skl */ + I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) & + ~GEN7_DOP_CLOCK_GATE_ENABLE); + /* WAC6entrylatency:skl */ I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | FBC_LLC_FULLY_OPEN); diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 939a6caebb034fe1e8ca6e6bb8ebb9c59a558d9b..632b912b0bc9b870d0bb7796e18589003a73712c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -8,8 +8,6 @@ #include "../i915_selftest.h" #include "i915_random.h" -#define SZ_8G (1ULL << 33) - static void __igt_dump_block(struct i915_buddy_mm *mm, struct i915_buddy_block *block, bool buddy) @@ -281,18 +279,22 @@ static int igt_check_mm(struct i915_buddy_mm *mm) static void igt_mm_config(u64 *size, u64 *chunk_size) { I915_RND_STATE(prng); - u64 s, ms; + u32 s, ms; /* Nothing fancy, just try to get an interesting bit pattern */ prandom_seed_state(&prng, i915_selftest.random_seed); - s = i915_prandom_u64_state(&prng) & (SZ_8G - 1); - ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12))); - s = max(s & -ms, ms); + /* Let size be a random number of pages up to 8 GB (2M pages) */ + s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng); + /* Let the chunk size be a random power of 2 less than size */ + ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng)); + /* Round size down to the chunk size */ + s &= -ms; - *chunk_size = ms; - *size = s; + /* Convert from pages to bytes */ + *chunk_size = (u64)ms << 12; + *size = (u64)s << 12; } static int igt_buddy_alloc_smoke(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 88d400b9df88c35c4544c64d1ab6c1e0b02eb649..23a6132c5f4e8ad8c1f21fc8355e194c5814cebd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -199,11 +199,52 @@ static int igt_gem_hibernate(void *arg) return err; } +static int igt_gem_ww_ctx(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj, *obj2; + struct i915_gem_ww_ctx ww; + int err = 0; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto put1; + } + + i915_gem_ww_ctx_init(&ww, true); +retry: + /* Lock the objects, twice for good measure (-EALREADY handling) */ + err = i915_gem_object_lock(obj, &ww); + if (!err) + err = i915_gem_object_lock_interruptible(obj, &ww); + if (!err) + err = i915_gem_object_lock_interruptible(obj2, &ww); + if (!err) + err = i915_gem_object_lock(obj2, &ww); + + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + i915_gem_object_put(obj2); +put1: + i915_gem_object_put(obj); + return err; +} + int i915_gem_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_gem_suspend), SUBTEST(igt_gem_hibernate), + SUBTEST(igt_gem_ww_ctx), }; if (intel_gt_is_wedged(&i915->gt)) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 0016ffc7d914732387ee81de8f3b8287a5a938a9..af8205a2bd8fd47ee54bacde29e64be5d2ea1351 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -172,35 +172,45 @@ static int igt_ppgtt_alloc(void *arg) /* Check we can allocate the entire range */ for (size = 4096; size <= limit; size <<= 2) { - err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); + struct i915_vm_pt_stash stash = {}; + + err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size); + if (err) + goto err_ppgtt_cleanup; + + err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); if (err) { - if (err == -ENOMEM) { - pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n", - size, ilog2(size)); - err = 0; /* virtual space too large! */ - } + i915_vm_free_pt_stash(&ppgtt->vm, &stash); goto err_ppgtt_cleanup; } + ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size); cond_resched(); ppgtt->vm.clear_range(&ppgtt->vm, 0, size); + + i915_vm_free_pt_stash(&ppgtt->vm, &stash); } /* Check we can incrementally allocate the entire range */ for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { - err = ppgtt->vm.allocate_va_range(&ppgtt->vm, - last, size - last); + struct i915_vm_pt_stash stash = {}; + + err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last); + if (err) + goto err_ppgtt_cleanup; + + err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); if (err) { - if (err == -ENOMEM) { - pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n", - last, size - last, ilog2(size)); - err = 0; /* virtual space too large! */ - } + i915_vm_free_pt_stash(&ppgtt->vm, &stash); goto err_ppgtt_cleanup; } + ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, + last, size - last); cond_resched(); + + i915_vm_free_pt_stash(&ppgtt->vm, &stash); } err_ppgtt_cleanup: @@ -284,9 +294,23 @@ static int lowlevel_hole(struct i915_address_space *vm, break; } - if (vm->allocate_va_range && - vm->allocate_va_range(vm, addr, BIT_ULL(size))) - break; + if (vm->allocate_va_range) { + struct i915_vm_pt_stash stash = {}; + + if (i915_vm_alloc_pt_stash(vm, &stash, + BIT_ULL(size))) + break; + + if (i915_vm_pin_pt_stash(vm, &stash)) { + i915_vm_free_pt_stash(vm, &stash); + break; + } + + vm->allocate_va_range(vm, &stash, + addr, BIT_ULL(size)); + + i915_vm_free_pt_stash(vm, &stash); + } mock_vma->pages = obj->mm.pages; mock_vma->node.size = BIT_ULL(size); @@ -1881,6 +1905,7 @@ static int igt_cs_tlb(void *arg) continue; while (!__igt_timeout(end_time, NULL)) { + struct i915_vm_pt_stash stash = {}; struct i915_request *rq; u64 offset; @@ -1888,10 +1913,6 @@ static int igt_cs_tlb(void *arg) 0, vm->total - PAGE_SIZE, chunk_size, PAGE_SIZE); - err = vm->allocate_va_range(vm, offset, chunk_size); - if (err) - goto end; - memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); vma = i915_vma_instance(bbe, vm, NULL); @@ -1904,6 +1925,20 @@ static int igt_cs_tlb(void *arg) if (err) goto end; + err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size); + if (err) + goto end; + + err = i915_vm_pin_pt_stash(vm, &stash); + if (err) { + i915_vm_free_pt_stash(vm, &stash); + goto end; + } + + vm->allocate_va_range(vm, &stash, offset, chunk_size); + + i915_vm_free_pt_stash(vm, &stash); + /* Prime the TLB with the dummy pages */ for (i = 0; i < count; i++) { vma->node.start = offset + i * PAGE_SIZE; diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index c2d001d9c0ec03e920195b26d61620fde6df3ba4..debbac660519e18a8e6c5b638b859d155f785257 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -307,7 +307,7 @@ static int live_noa_gpr(void *arg) } /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */ - scratch = kmap(ce->vm->scratch[0].base.page); + scratch = kmap(__px_page(ce->vm->scratch[0])); memset(scratch, POISON_FREE, PAGE_SIZE); rq = intel_context_create_request(ce); @@ -405,7 +405,7 @@ static int live_noa_gpr(void *arg) out_rq: i915_request_put(rq); out_ce: - kunmap(ce->vm->scratch[0].base.page); + kunmap(__px_page(ce->vm->scratch[0])); intel_context_put(ce); out: stream_destroy(stream); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 57dd6f5122eedc4f01c59cfdd1838ebebcf4834d..3092ca763789c052ff15af67dd713cc88ad88b37 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -331,7 +331,7 @@ static int __igt_breadcrumbs_smoketest(void *arg) if (!wait) { i915_sw_fence_commit(submit); heap_fence_put(submit); - err = ENOMEM; + err = -ENOMEM; break; } @@ -862,6 +862,8 @@ static int live_all_engines(void *arg) goto out_free; } + i915_vma_lock(batch); + idx = 0; for_each_uabi_engine(engine, i915) { request[idx] = intel_engine_create_kernel_request(engine); @@ -872,11 +874,9 @@ static int live_all_engines(void *arg) goto out_request; } - i915_vma_lock(batch); err = i915_request_await_object(request[idx], batch->obj, 0); if (err == 0) err = i915_vma_move_to_active(batch, request[idx], 0); - i915_vma_unlock(batch); GEM_BUG_ON(err); err = engine->emit_bb_start(request[idx], @@ -891,6 +891,8 @@ static int live_all_engines(void *arg) idx++; } + i915_vma_unlock(batch); + idx = 0; for_each_uabi_engine(engine, i915) { if (i915_request_completed(request[idx])) { @@ -981,12 +983,13 @@ static int live_sequential_engines(void *arg) goto out_free; } + i915_vma_lock(batch); request[idx] = intel_engine_create_kernel_request(engine); if (IS_ERR(request[idx])) { err = PTR_ERR(request[idx]); pr_err("%s: Request allocation failed for %s with err=%d\n", __func__, engine->name, err); - goto out_request; + goto out_unlock; } if (prev) { @@ -996,16 +999,14 @@ static int live_sequential_engines(void *arg) i915_request_add(request[idx]); pr_err("%s: Request await failed for %s with err=%d\n", __func__, engine->name, err); - goto out_request; + goto out_unlock; } } - i915_vma_lock(batch); err = i915_request_await_object(request[idx], batch->obj, false); if (err == 0) err = i915_vma_move_to_active(batch, request[idx], 0); - i915_vma_unlock(batch); GEM_BUG_ON(err); err = engine->emit_bb_start(request[idx], @@ -1020,6 +1021,11 @@ static int live_sequential_engines(void *arg) prev = request[idx]; idx++; + +out_unlock: + i915_vma_unlock(batch); + if (err) + goto out_request; } idx = 0; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index af89c7fc8f59340bad9d38628b0c6f751caed521..88c5e9acb84c79e8040383b8e8c6939cd24cbf71 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -892,7 +892,7 @@ static int igt_vma_remapped_gtt(void *arg) unsigned int x, y; int err; - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); i915_gem_object_unlock(obj); if (err) diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 6e80d99048e4bd363e04abf493336fd2cd9d11ec..93a38a3235843fb5ed580a64b17d275168615b7f 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -509,7 +509,7 @@ static int igt_lmem_write_cpu(void *arg) if (err) goto out_unpin; - i915_gem_object_lock(obj); + i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_wc_domain(obj, true); i915_gem_object_unlock(obj); if (err) @@ -522,9 +522,9 @@ static int igt_lmem_write_cpu(void *arg) goto out_unpin; } - /* We want to throw in a random width/align */ - bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32), - sizeof(u32)); + /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */ + bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32)); + GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32))); i = 0; do { diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index b9810bf156c362a1b86fa013cb044491c61be559..f127e633f7ca816b40fccb64d6a7f3ac4aad44a8 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -78,6 +78,7 @@ static void mock_device_release(struct drm_device *dev) drm_mode_config_cleanup(&i915->drm); out: + i915_params_free(&i915->params); put_device(&i915->drm.pdev->dev); i915->drm.pdev = NULL; } @@ -165,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void) i915->drm.pdev = pdev; drmm_add_final_kfree(&i915->drm, i915); + i915_params_copy(&i915->params, &i915_modparams); + intel_runtime_pm_init_early(&i915->runtime_pm); /* Using the global GTT may ask questions about KMS users, so prepare */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index b173086411ef52c52d830d5da76e2219aa3eb7c8..7270fc8ca801a27616d49f061ef99d8b2a8247d6 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -38,14 +38,14 @@ static void mock_insert_entries(struct i915_address_space *vm, { } -static int mock_bind_ppgtt(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static void mock_bind_ppgtt(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND); set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma)); - return 0; } static void mock_unbind_ppgtt(struct i915_address_space *vm, @@ -74,9 +74,12 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); + ppgtt->vm.dma = &i915->drm.pdev->dev; i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); + ppgtt->vm.alloc_pt_dma = alloc_pt_dma; + ppgtt->vm.clear_range = mock_clear_range; ppgtt->vm.insert_page = mock_insert_page; ppgtt->vm.insert_entries = mock_insert_entries; @@ -90,13 +93,12 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) return ppgtt; } -static int mock_bind_ggtt(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static void mock_bind_ggtt(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { - atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); - return 0; } static void mock_unbind_ggtt(struct i915_address_space *vm, @@ -116,6 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->mappable_end = resource_size(&ggtt->gmadr); ggtt->vm.total = 4096 * PAGE_SIZE; + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.clear_range = mock_clear_range; ggtt->vm.insert_page = mock_insert_page; ggtt->vm.insert_entries = mock_insert_entries; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 6776ebb3246d4d8c4f1ca1402a778e1e3d2c9183..8a4235d9d9f1e7bf622f286242c6d19798dfc211 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -447,7 +447,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (fb->pitches[1] != fb->pitches[2]) return -EINVAL; - /* fall-through */ + fallthrough; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: ubo = drm_plane_state_to_ubo(state); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 5dab9c3d0a520283c42266c24c0f9a256b2406a1..a3d1617d7c67e0d45e564be8697cc3ffc647c339 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -686,7 +686,7 @@ static void ingenic_drm_unbind_all(void *d) component_unbind_all(priv->dev, &priv->drm); } -static int ingenic_drm_bind(struct device *dev) +static int ingenic_drm_bind(struct device *dev, bool has_components) { struct platform_device *pdev = to_platform_device(dev); const struct jz_soc_info *soc_info; @@ -821,7 +821,7 @@ static int ingenic_drm_bind(struct device *dev) return ret; } - if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) { + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) { ret = component_bind_all(dev, drm); if (ret) { if (ret != -EPROBE_DEFER) @@ -952,6 +952,11 @@ static int ingenic_drm_bind(struct device *dev) return ret; } +static int ingenic_drm_bind_with_components(struct device *dev) +{ + return ingenic_drm_bind(dev, true); +} + static int compare_of(struct device *dev, void *data) { return dev->of_node == data; @@ -970,7 +975,7 @@ static void ingenic_drm_unbind(struct device *dev) } static const struct component_master_ops ingenic_master_ops = { - .bind = ingenic_drm_bind, + .bind = ingenic_drm_bind_with_components, .unbind = ingenic_drm_unbind, }; @@ -981,16 +986,15 @@ static int ingenic_drm_probe(struct platform_device *pdev) struct device_node *np; if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) - return ingenic_drm_bind(dev); + return ingenic_drm_bind(dev, false); /* IPU is at port address 8 */ np = of_graph_get_remote_node(dev->of_node, 8, 0); - if (!np) { - dev_err(dev, "Unable to get IPU node\n"); - return -EINVAL; - } + if (!np) + return ingenic_drm_bind(dev, false); drm_of_component_match_add(dev, &match, compare_of, np); + of_node_put(np); return component_master_add_with_match(dev, &ingenic_master_ops, match); } diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c index f12e0271f16655fae972a331c7e7fdd9eb6c59e8..ffc6b584dbf85d82b778b37ebc3b0a069063ea08 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.c +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c @@ -205,7 +205,7 @@ static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format) /* YTR is forbidden for non XBGR formats */ if (modifier & AFBC_FORMAT_MOD_YTR) return -EINVAL; - /* fall through */ + fallthrough; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return MAFBC_FMT_RGBA8888; diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index a8bcc70644dff5878d132c70137b74c78c3d1348..1ffbbecafa22ba9532f7ab5a77f11084ae8b73ad 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -654,7 +654,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, priv->viu.vd1_addr2, priv->viu.vd1_stride2, priv->viu.vd1_height2); - /* fallthrough */ + fallthrough; case 2: gem = drm_fb_cma_get_gem_obj(fb, 1); priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1]; @@ -666,7 +666,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, priv->viu.vd1_addr1, priv->viu.vd1_stride1, priv->viu.vd1_height1); - /* fallthrough */ + fallthrough; case 1: gem = drm_fb_cma_get_gem_obj(fb, 0); priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0]; diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 6021f8d9efd1fbb0230ee921da3a9cdac3095ef1..48fa49f69d6d0a2180afecdcd7327de5344b398e 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* NOTE: PM4/micro-engine firmware registers look to be the same * for a2xx and a3xx.. we could possibly push that part down to * adreno_gpu base class. Or push both PM4 and PFP but diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 0a5ea9f56cb886aba890fffe00c414942cf15a4b..f6471145a7a607db4984242fdee8aa1d2e5332fc 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* setup access protection: */ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index b9b26b2bf9c548d38b81347fb745d811ccf20db4..9547536006254721ed1e0f9963830c0f3867a276 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_A4XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + /* Load PM4: */ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 9e63a190642cc490d25e8b55e7fbb738d39311ab..91726da82ed67a28634c2044975b0852fc5212b3 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,7 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -150,7 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); @@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; - a5xx_preempt_hw_init(gpu); - if (!adreno_is_a510(adreno_gpu)) a5xx_gpmu_ucode_init(gpu); @@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI, + gpu->rb[0]->iova); + + gpu_write(gpu, REG_A5XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + a5xx_preempt_hw_init(gpu); + /* Disable the interrupts through the initial bringup stage */ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); @@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) check_speed_bin(&pdev->dev); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); + /* Restricting nr_rings to 1 to temporarily disable preemption */ + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 54868d4e3958f318dc67dd48506c04a41969a49e..1e5b1a15a70f02e80c4ea3c981ff555c877d2b2b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -31,6 +31,7 @@ struct a5xx_gpu { struct msm_ringbuffer *next_ring; struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; + struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 9cf9353a7ff11334a04f1ca229ebc7d2a7976328..9f3fe177b00e935ec908fe80ea28f95cf1d27753 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, struct adreno_gpu *adreno_gpu = &a5xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; struct a5xx_preempt_record *ptr; - struct drm_gem_object *bo = NULL; - u64 iova = 0; + void *counters; + struct drm_gem_object *bo = NULL, *counters_bo = NULL; + u64 iova = 0, counters_iova = 0; ptr = msm_gem_kernel_new(gpu->dev, A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, - MSM_BO_UNCACHED, gpu->aspace, &bo, &iova); + MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); if (IS_ERR(ptr)) return PTR_ERR(ptr); + /* The buffer to store counters needs to be unprivileged */ + counters = msm_gem_kernel_new(gpu->dev, + A5XX_PREEMPT_COUNTER_SIZE, + MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova); + if (IS_ERR(counters)) { + msm_gem_kernel_put(bo, gpu->aspace, true); + return PTR_ERR(counters); + } + msm_gem_object_set_name(bo, "preempt"); + msm_gem_object_set_name(counters_bo, "preempt_counters"); a5xx_gpu->preempt_bo[ring->id] = bo; + a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo; a5xx_gpu->preempt_iova[ring->id] = iova; a5xx_gpu->preempt[ring->id] = ptr; @@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; ptr->rptr_addr = rbmemptr(ring, rptr); - ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; + ptr->counter = counters_iova; return 0; } @@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); int i; - for (i = 0; i < gpu->nr_rings; i++) + for (i = 0; i < gpu->nr_rings; i++) { msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true); + msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], + gpu->aspace, true); + } } void a5xx_preempt_init(struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index b67b38c8fadf73a6834acf77e2a0ae1baf8dc74d..e1c7bcd1b1eb7bcdaec33785a5eb0b33f57e4142 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -133,7 +133,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (!gmu->legacy) { a6xx_hfi_set_freq(gmu, perf_index); - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); + dev_pm_opp_set_bw(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); return; } @@ -157,11 +157,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (ret) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); - /* - * Eventually we will want to scale the path vote with the frequency but - * for now leave it at max so that the performance is nominal. - */ - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); + dev_pm_opp_set_bw(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); } @@ -204,6 +200,16 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) { int ret; u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); @@ -215,7 +221,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); @@ -602,7 +608,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); - /* Fall through */ + fallthrough; case GMU_IDLE_STATE_SPTP: gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, GMU_PWR_COL_HYST); @@ -845,10 +851,24 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) if (IS_ERR_OR_NULL(gpu_opp)) return; + gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ a6xx_gmu_set_freq(gpu, gpu_opp); dev_pm_opp_put(gpu_opp); } +static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +{ + struct dev_pm_opp *gpu_opp; + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; + + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); + if (IS_ERR_OR_NULL(gpu_opp)) + return; + + dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp); + dev_pm_opp_put(gpu_opp); +} + int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; @@ -882,7 +902,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) } /* Set the bus quota to a reasonable value for boot */ - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); + a6xx_gmu_set_initial_bw(gpu, gmu); /* Enable the GMU interrupt */ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); @@ -1051,7 +1071,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) a6xx_gmu_shutdown(gmu); /* Remove the bus vote */ - icc_set_bw(gpu->icc_path, 0, 0); + dev_pm_opp_set_bw(&gpu->pdev->dev, NULL); /* * Make sure the GX domain is off before turning off the GMU (CX) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index c5a3e4d4c0078527225e6263b6ebbe06f0e162b4..66a95e22b7b3d523b5dca63bfc12595707b98c4d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -117,7 +117,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); @@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu) A6XX_PROTECT_RDONLY(0x980, 0x4)); gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0)); - if (adreno_is_a650(adreno_gpu)) { + /* Enable expanded apriv for targets that support it */ + if (gpu->hw_apriv) { gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1)); } @@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) if (ret) goto out; + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, + gpu->rb[0]->iova); + + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; @@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = NULL; adreno_gpu->reg_offsets = a6xx_register_offsets; + if (adreno_is_a650(adreno_gpu)) + adreno_gpu->base.hw_apriv = true; + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 959656ad69871b95e8438cdb5dd2d6c83d364b4e..b12f5b4a1bea9109545b2c700b26df72577c87eb 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -938,7 +938,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) msm_gem_kernel_put(dumper.bo, gpu->aspace, true); } - a6xx_get_debugbus(gpu, a6xx_state); + if (snapshot_debugbus) + a6xx_get_debugbus(gpu, a6xx_state); return &a6xx_state->base; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index 846fd5b54c2302b55210b47c9a7323600ad6463d..2fb58b7098e4befe3623cb9381c231da6e9c4ee8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -372,7 +372,7 @@ static const struct a6xx_indexed_registers { u32 data; u32 count; } a6xx_indexed_reglist[] = { - { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR, + { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, REG_A6XX_CP_SQE_STAT_DATA, 0x33 }, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, REG_A6XX_CP_DRAW_STATE_DATA, 0x100 }, diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 4e84f3c76f4f5e2de84d4d076da724e5c65fa31a..9eeb46bf2a5dccb3784ad14a7832713402e9e953 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -14,6 +14,10 @@ bool hang_debug = false; MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); module_param_named(hang_debug, hang_debug, bool, 0600); +bool snapshot_debugbus = false; +MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); +module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); + static const struct adreno_info gpulist[] = { { .rev = ADRENO_REV(2, 0, 0, 0), diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index e23641a5ec841c9ad997052c207b3739428b4177..862dd35b27d3dcc7451c883465ff9445bf7c36ce 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -396,30 +396,10 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno: */ - ring->memptrs->fence = ring->seqno; + ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; } - /* - * Setup REG_CP_RB_CNTL. The same value is used across targets (with - * the excpetion of A430 that disables the RPTR shadow) - the cacluation - * for the ringbuffer size and block size is moved to msm_gpu.h for the - * pre-processor to deal with and the A430 variant is ORed in here - */ - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, - MSM_GPU_RB_CNTL_DEFAULT | - (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); - - /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, - REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); - - if (!adreno_is_a430(adreno_gpu)) { - adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - rbmemptr(gpu->rb[0], rptr)); - } - return 0; } @@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu) static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, struct msm_ringbuffer *ring) { - if (adreno_is_a430(adreno_gpu)) - return ring->memptrs->rptr = adreno_gpu_read( - adreno_gpu, REG_ADRENO_CP_RB_RPTR); - else - return ring->memptrs->rptr; + return ring->memptrs->rptr = adreno_gpu_read( + adreno_gpu, REG_ADRENO_CP_RB_RPTR); } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) @@ -474,7 +451,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; - /* fall-thru */ + fallthrough; case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 99bb468f5f245ef4f0b2473d56628e2daed4b4bc..e55abae365b5a3e0b7cc1c7cbab7da0b5ae5dc2b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -21,6 +21,8 @@ #define REG_SKIP ~0 #define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP +extern bool snapshot_debugbus; + /** * adreno_regs: List of registers that are used in across all * 3D devices. Each device type has different offset value for the same diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index f272a8d0f95b20f26bfb39781b0f65b7b092ef36..c2729f71e2fa7386c38170fab8edf81a0bef5606 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -827,7 +827,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, { struct dpu_crtc *dpu_crtc; struct drm_encoder *encoder; - bool request_bandwidth; + bool request_bandwidth = false; if (!crtc) { DPU_ERROR("invalid crtc\n"); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index a97f6d2e5a089c925ce7abb6362c77effb02051d..bd6def436c657961fe880ddae0b5602a8c3de373 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -599,7 +599,10 @@ static int dpu_encoder_virt_atomic_check( dpu_kms = to_dpu_kms(priv->kms); mode = &crtc_state->mode; adj_mode = &crtc_state->adjusted_mode; - global_state = dpu_kms_get_existing_global_state(dpu_kms); + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + trace_dpu_enc_atomic_check(DRMID(drm_enc)); /* perform atomic check on the first physical encoder (master) */ @@ -625,12 +628,15 @@ static int dpu_encoder_virt_atomic_check( /* Reserve dynamic resources now. */ if (!ret) { /* - * Avoid reserving resources when mode set is pending. Topology - * info may not be available to complete reservation. + * Release and Allocate resources on every modeset + * Dont allocate when active is false. */ if (drm_atomic_crtc_needs_modeset(crtc_state)) { - ret = dpu_rm_reserve(&dpu_kms->rm, global_state, - drm_enc, crtc_state, topology); + dpu_rm_release(global_state, drm_enc); + + if (!crtc_state->active_changed || crtc_state->active) + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + drm_enc, crtc_state, topology); } } @@ -1181,7 +1187,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct dpu_global_state *global_state; int i = 0; if (!drm_enc) { @@ -1200,7 +1205,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); - global_state = dpu_kms_get_existing_global_state(dpu_kms); trace_dpu_enc_disable(DRMID(drm_enc)); @@ -1230,8 +1234,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - dpu_rm_release(global_state, drm_enc); - mutex_unlock(&dpu_enc->enc_lock); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 33f6c56f01edac0172b6964b4f778e1bf872b762..29e373d2e7b5dcb08c36ae41e0d615108ccf6ea4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -866,9 +866,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); - min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, - pdpu->pipe_sblk->maxupscale << 16, + pdpu->pipe_sblk->maxdwnscale << 16, true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 5ccfad794c6a5b9551158f669ecbc01393a0a1bd..561bfa48841c33ced37e2b0a032f146acd4f6641 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, return msm_framebuffer_prepare(new_state->fb, kms->aspace); } +/* + * Helpers to control vblanks while we flush.. basically just to ensure + * that vblank accounting is switched on, so we get valid seqn/timestamp + * on pageflip events (if requested) + */ + +static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_get(crtc); + } +} + +static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_put(crtc); + } +} + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) { unsigned crtc_mask = BIT(crtc_idx); @@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->enable_commit(kms); + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); @@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) */ kms->pending_crtc_mask &= ~crtc_mask; + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7d641c7e3514a7b653a29cece75896c800ca6417..79333842f70a1ff574d8e47137f7bf1af91cbcd3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1320,6 +1320,13 @@ static int msm_pdev_remove(struct platform_device *pdev) return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, @@ -1332,6 +1339,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index d5645472b25da4a1f445ea2316f5c35ac0f01201..57ddc9438351e994eed19b4ad9d3f8cb9a6dd825 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_rbmemptrs) * nr_rings, - MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, + check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo, &memptrs_iova); if (IS_ERR(memptrs)) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 0db117a7339b6959a979a31ccbd4f2b3b0adcc9f..37cffac4cbe3428fbd91c2700d8ea957c328db81 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -15,6 +15,7 @@ #include "msm_drv.h" #include "msm_fence.h" #include "msm_ringbuffer.h" +#include "msm_gem.h" struct msm_gem_submit; struct msm_gpu_perfcntr; @@ -139,6 +140,8 @@ struct msm_gpu { } devfreq; struct msm_gpu_state *crashstate; + /* True if the hardware supports expanded apriv (a650 and newer) */ + bool hw_apriv; }; /* It turns out that all targets use the same ringbuffer size */ @@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) mutex_unlock(&gpu->dev->struct_mutex); } +/* + * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can + * support expanded privileges + */ +#define check_apriv(gpu, flags) \ + (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags)) + + #endif /* __MSM_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index e397c44cc01120e6e1829942696a02ddb3a8e28a..935bf9b1d94188cba556a0e5dfca389ba008f126 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->id = id; ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, - MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); + check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY), + gpu->aspace, &ring->bo, &ring->iova); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index ffdd447d870685af241fd754ce2803731e62ca9a..22d10f328559740c1d65914bfd15b1933d96a38c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -419,7 +419,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", - nouveau_encoder_connector_get(nv_encoder)->base.name, + nv04_encoder_get_connector(nv_encoder)->base.name, nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index f9f4482c79b5488532ba11aed3cca877a3283421..42687ea2a4ca32fb3a30b37d5bd115c7218f12de 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -184,7 +184,8 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); + struct nouveau_connector *nv_connector = + nv04_encoder_get_connector(nv_encoder); if (!nv_connector->native_mode || nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || @@ -478,7 +479,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", - nouveau_encoder_connector_get(nv_encoder)->base.name, + nv04_encoder_get_connector(nv_encoder)->base.name, nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } @@ -591,7 +592,7 @@ static void nv04_dfp_restore(struct drm_encoder *encoder) if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) { struct nouveau_connector *connector = - nouveau_encoder_connector_get(nv_encoder); + nv04_encoder_get_connector(nv_encoder); if (connector && connector->native_mode) call_lvds_script(dev, nv_encoder->dcb, head, diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 41d990cca6850e36e8ff4ac86e144d50263751e6..7739f46470d3e1215402e541f55194d6764d34a9 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -35,9 +35,28 @@ #include +struct nouveau_connector * +nv04_encoder_get_connector(struct nouveau_encoder *encoder) +{ + struct drm_device *dev = to_drm_encoder(encoder)->dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct nouveau_connector *nv_connector = NULL; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->encoder == to_drm_encoder(encoder)) + nv_connector = nouveau_connector(connector); + } + drm_connector_list_iter_end(&conn_iter); + + return nv_connector; +} + static void -nv04_display_fini(struct drm_device *dev, bool suspend) +nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend) { + struct nouveau_drm *drm = nouveau_drm(dev); struct nv04_display *disp = nv04_display(dev); struct drm_crtc *crtc; @@ -49,6 +68,9 @@ nv04_display_fini(struct drm_device *dev, bool suspend) if (nv_two_heads(dev)) NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); + if (!runtime) + cancel_work_sync(&drm->hpd_work); + if (!suspend) return; diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 495d3284e876695642727fb73ce908842315f144..5ace5e906949a02c5b4fc9653182a19f87212b0f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -6,6 +6,8 @@ #include "nouveau_display.h" +struct nouveau_encoder; + enum nv04_fp_display_regs { FP_DISPLAY_END, FP_TOTAL, @@ -93,6 +95,8 @@ nv04_display(struct drm_device *dev) /* nv04_display.c */ int nv04_display_create(struct drm_device *); +struct nouveau_connector * +nv04_encoder_get_connector(struct nouveau_encoder *nv_encoder); /* nv04_crtc.c */ int nv04_crtc_create(struct drm_device *, int index); diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index b701a4d8fe7608b86b71b374a20df93551c018c3..3ba7b59580d59efeab05d316d6eb0913e23716b7 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -172,7 +172,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", - nouveau_encoder_connector_get(nv_encoder)->base.name, + nv04_encoder_get_connector(nv_encoder)->base.name, nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 3a9489ed6544f0be5763ad55d0d89d5dc9e88f8c..be28e7bd7490324c42c4551ba4d1ff835e1d0b46 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -599,7 +599,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n", - nouveau_encoder_connector_get(nv_encoder)->base.name, + nv04_encoder_get_connector(nv_encoder)->base.name, nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c index ad1f09a143aa48711470a8da2ef7c630755996a2..248edf69e1683170f2bab46941e4b539d79fa6a4 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c @@ -50,7 +50,10 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy) interlock[NV50_DISP_INTERLOCK_OVLY] | NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) | NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) | - NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE)); + NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE), + + SET_NOTIFIER_CONTROL, + NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE)); return PUSH_KICK(push); } diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c index 9afe9a87bde0c6963caa0d916ee48c0fc7aef0b7..814e5bd97446026fa2e663b6427426b014b69729 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c @@ -6,7 +6,7 @@ #include "disp.h" #include "head.h" -#include +#include #include diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 9dfb577f148a7da626137a8624d8f6b908ca7917..852e1b56f3a7cbd7f806982a3aa9057a2bb51f0a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -257,6 +257,12 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, dmac->push->end = dmac->push->bgn; dmac->max = 0x1000/4 - 1; + /* EVO channels are affected by a HW bug where the last 12 DWORDs + * of the push buffer aren't able to be used safely. + */ + if (disp->oclass < GV100_DISP) + dmac->max -= 12; + args->pushbuf = nvif_handle(&dmac->_push.mem.object); ret = nv50_chan_create(device, disp, oclass, head, data, size, @@ -411,6 +417,40 @@ nv50_outp_atomic_check(struct drm_encoder *encoder, return 0; } +struct nouveau_connector * +nv50_outp_get_new_connector(struct nouveau_encoder *outp, + struct drm_atomic_state *state) +{ + struct drm_connector *connector; + struct drm_connector_state *connector_state; + struct drm_encoder *encoder = to_drm_encoder(outp); + int i; + + for_each_new_connector_in_state(state, connector, connector_state, i) { + if (connector_state->best_encoder == encoder) + return nouveau_connector(connector); + } + + return NULL; +} + +struct nouveau_connector * +nv50_outp_get_old_connector(struct nouveau_encoder *outp, + struct drm_atomic_state *state) +{ + struct drm_connector *connector; + struct drm_connector_state *connector_state; + struct drm_encoder *encoder = to_drm_encoder(outp); + int i; + + for_each_old_connector_in_state(state, connector, connector_state, i) { + if (connector_state->best_encoder == encoder) + return nouveau_connector(connector); + } + + return NULL; +} + /****************************************************************************** * DAC *****************************************************************************/ @@ -552,16 +592,31 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id, struct nouveau_drm *drm = nouveau_drm(drm_dev); struct drm_encoder *encoder; struct nouveau_encoder *nv_encoder; - struct nouveau_connector *nv_connector; + struct drm_connector *connector; struct nouveau_crtc *nv_crtc; + struct drm_connector_list_iter conn_iter; int ret = 0; *enabled = false; + drm_for_each_encoder(encoder, drm->dev) { + struct nouveau_connector *nv_connector = NULL; + nv_encoder = nouveau_encoder(encoder); - nv_connector = nouveau_encoder_connector_get(nv_encoder); + + drm_connector_list_iter_begin(drm_dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->state->best_encoder == encoder) { + nv_connector = nouveau_connector(connector); + break; + } + } + drm_connector_list_iter_end(&conn_iter); + if (!nv_connector) + continue; + nv_crtc = nouveau_crtc(encoder->crtc); - if (!nv_connector || !nv_crtc || nv_encoder->or != port || + if (!nv_crtc || nv_encoder->or != port || nv_crtc->index != dev_id) continue; *enabled = nv_encoder->audio; @@ -572,6 +627,7 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id, } break; } + return ret; } @@ -665,7 +721,8 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) } static void -nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) +nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state, + struct drm_display_mode *mode) { struct nouveau_drm *drm = nouveau_drm(encoder->dev); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); @@ -686,7 +743,7 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) (0x0100 << nv_crtc->index), }; - nv_connector = nouveau_encoder_connector_get(nv_encoder); + nv_connector = nv50_outp_get_new_connector(nv_encoder, state); if (!drm_detect_monitor_audio(nv_connector->edid)) return; @@ -723,7 +780,8 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) } static void -nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) +nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state, + struct drm_display_mode *mode) { struct nouveau_drm *drm = nouveau_drm(encoder->dev); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); @@ -752,7 +810,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) int ret; int size; - nv_connector = nouveau_encoder_connector_get(nv_encoder); + nv_connector = nv50_outp_get_new_connector(nv_encoder, state); if (!drm_detect_hdmi_monitor(nv_connector->edid)) return; @@ -798,7 +856,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) + args.pwr.vendor_infoframe_length; nvif_mthd(&disp->disp->object, 0, &args, size); - nv50_audio_enable(encoder, mode); + nv50_audio_enable(encoder, state, mode); /* If SCDC is supported by the downstream monitor, update * divider / scrambling settings to what we programmed above. @@ -827,16 +885,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode) #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector) #define nv50_msto(p) container_of((p), struct nv50_msto, encoder) -struct nv50_mstm { - struct nouveau_encoder *outp; - - struct drm_dp_mst_topology_mgr mgr; - - bool modified; - bool disabled; - int links; -}; - struct nv50_mstc { struct nv50_mstm *mstm; struct drm_dp_mst_port *port; @@ -1216,7 +1264,10 @@ nv50_mstc_detect(struct drm_connector *connector, ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr, mstc->port); + if (ret != connector_status_connected) + goto out; +out: pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); return ret; @@ -1365,41 +1416,51 @@ nv50_mstm = { .add_connector = nv50_mstm_add_connector, }; -void -nv50_mstm_service(struct nv50_mstm *mstm) +bool +nv50_mstm_service(struct nouveau_drm *drm, + struct nouveau_connector *nv_connector, + struct nv50_mstm *mstm) { - struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; - bool handled = true; - int ret; + struct drm_dp_aux *aux = &nv_connector->aux; + bool handled = true, ret = true; + int rc; u8 esi[8] = {}; - if (!aux) - return; - while (handled) { - ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); - if (ret != 8) { - drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); - return; + rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); + if (rc != 8) { + ret = false; + break; } drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled); if (!handled) break; - drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3); + rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], + 3); + if (rc != 3) { + ret = false; + break; + } } + + if (!ret) + NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n", + nv_connector->base.name, rc); + + return ret; } void nv50_mstm_remove(struct nv50_mstm *mstm) { - if (mstm) - drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); + mstm->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); } static int -nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) +nv50_mstm_enable(struct nv50_mstm *mstm, int state) { struct nouveau_encoder *outp = mstm->outp; struct { @@ -1414,106 +1475,85 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) }; struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev); struct nvif_object *disp = &drm->display->disp.object; - int ret; - - if (dpcd >= 0x12) { - /* Even if we're enabling MST, start with disabling the - * branching unit to clear any sink-side MST topology state - * that wasn't set by us - */ - ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0); - if (ret < 0) - return ret; - - if (state) { - /* Now, start initializing */ - ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, - DP_MST_EN); - if (ret < 0) - return ret; - } - } return nvif_mthd(disp, 0, &args, sizeof(args)); } int -nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) +nv50_mstm_detect(struct nouveau_encoder *outp) { + struct nv50_mstm *mstm = outp->dp.mstm; struct drm_dp_aux *aux; int ret; - bool old_state, new_state; - u8 mstm_ctrl; - if (!mstm) + if (!mstm || !mstm->can_mst) return 0; - mutex_lock(&mstm->mgr.lock); - - old_state = mstm->mgr.mst_state; - new_state = old_state; aux = mstm->mgr.aux; - if (old_state) { - /* Just check that the MST hub is still as we expect it */ - ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl); - if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) { - DRM_DEBUG_KMS("Hub gone, disabling MST topology\n"); - new_state = false; - } - } else if (dpcd[0] >= 0x12) { - ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]); - if (ret < 0) - goto probe_error; - - if (!(dpcd[1] & DP_MST_CAP)) - dpcd[0] = 0x11; - else - new_state = allow; - } - - if (new_state == old_state) { - mutex_unlock(&mstm->mgr.lock); - return new_state; - } - - ret = nv50_mstm_enable(mstm, dpcd[0], new_state); - if (ret) - goto probe_error; - - mutex_unlock(&mstm->mgr.lock); + /* Clear any leftover MST state we didn't set ourselves by first + * disabling MST if it was already enabled + */ + ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0); + if (ret < 0) + return ret; - ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state); + /* And start enabling */ + ret = nv50_mstm_enable(mstm, true); if (ret) - return nv50_mstm_enable(mstm, dpcd[0], 0); + return ret; - return new_state; + ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true); + if (ret) { + nv50_mstm_enable(mstm, false); + return ret; + } -probe_error: - mutex_unlock(&mstm->mgr.lock); - return ret; + mstm->is_mst = true; + return 1; } static void -nv50_mstm_fini(struct nv50_mstm *mstm) +nv50_mstm_fini(struct nouveau_encoder *outp) { - if (mstm && mstm->mgr.mst_state) + struct nv50_mstm *mstm = outp->dp.mstm; + + if (!mstm) + return; + + /* Don't change the MST state of this connector until we've finished + * resuming, since we can't safely grab hpd_irq_lock in our resume + * path to protect mstm->is_mst without potentially deadlocking + */ + mutex_lock(&outp->dp.hpd_irq_lock); + mstm->suspended = true; + mutex_unlock(&outp->dp.hpd_irq_lock); + + if (mstm->is_mst) drm_dp_mst_topology_mgr_suspend(&mstm->mgr); } static void -nv50_mstm_init(struct nv50_mstm *mstm, bool runtime) +nv50_mstm_init(struct nouveau_encoder *outp, bool runtime) { - int ret; + struct nv50_mstm *mstm = outp->dp.mstm; + int ret = 0; - if (!mstm || !mstm->mgr.mst_state) + if (!mstm) return; - ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); - if (ret == -1) { - drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); - drm_kms_helper_hotplug_event(mstm->mgr.dev); + if (mstm->is_mst) { + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); + if (ret == -1) + nv50_mstm_remove(mstm); } + + mutex_lock(&outp->dp.hpd_irq_lock); + mstm->suspended = false; + mutex_unlock(&outp->dp.hpd_irq_lock); + + if (ret == -1) + drm_kms_helper_hotplug_event(mstm->mgr.dev); } static void @@ -1535,17 +1575,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, struct drm_device *dev = outp->base.base.dev; struct nv50_mstm *mstm; int ret; - u8 dpcd; - - /* This is a workaround for some monitors not functioning - * correctly in MST mode on initial module load. I think - * some bad interaction with the VBIOS may be responsible. - * - * A good ol' off and on again seems to work here ;) - */ - ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd); - if (ret >= 0 && dpcd >= 0x12) - drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0); if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL))) return -ENOMEM; @@ -1584,23 +1613,27 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head, } static void -nv50_sor_disable(struct drm_encoder *encoder) +nv50_sor_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); + struct nouveau_connector *nv_connector = + nv50_outp_get_old_connector(nv_encoder, state); nv_encoder->crtc = NULL; if (nv_crtc) { - struct nvkm_i2c_aux *aux = nv_encoder->aux; + struct drm_dp_aux *aux = &nv_connector->aux; u8 pwr; - if (aux) { - int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1); + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { + int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr); + if (ret == 0) { pwr &= ~DP_SET_POWER_MASK; pwr |= DP_SET_POWER_D3; - nvkm_wraux(aux, DP_SET_POWER, &pwr, 1); + drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr); } } @@ -1612,7 +1645,8 @@ nv50_sor_disable(struct drm_encoder *encoder) } static void -nv50_sor_enable(struct drm_encoder *encoder) +nv50_sor_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -1636,7 +1670,7 @@ nv50_sor_enable(struct drm_encoder *encoder) u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM; u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; - nv_connector = nouveau_encoder_connector_get(nv_encoder); + nv_connector = nv50_outp_get_new_connector(nv_encoder, state); nv_encoder->crtc = encoder->crtc; if ((disp->disp->object.oclass == GT214_DISP || @@ -1663,7 +1697,7 @@ nv50_sor_enable(struct drm_encoder *encoder) proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; } - nv50_hdmi_enable(&nv_encoder->base.base, mode); + nv50_hdmi_enable(&nv_encoder->base.base, state, mode); break; case DCB_OUTPUT_LVDS: proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; @@ -1704,7 +1738,7 @@ nv50_sor_enable(struct drm_encoder *encoder) else proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B; - nv50_audio_enable(encoder, mode); + nv50_audio_enable(encoder, state, mode); break; default: BUG(); @@ -1717,8 +1751,8 @@ nv50_sor_enable(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs nv50_sor_help = { .atomic_check = nv50_outp_atomic_check, - .enable = nv50_sor_enable, - .disable = nv50_sor_disable, + .atomic_enable = nv50_sor_enable, + .atomic_disable = nv50_sor_disable, }; static void @@ -1727,6 +1761,10 @@ nv50_sor_destroy(struct drm_encoder *encoder) struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); nv50_mstm_del(&nv_encoder->dp.mstm); drm_encoder_cleanup(encoder); + + if (nv_encoder->dcb->type == DCB_OUTPUT_DP) + mutex_destroy(&nv_encoder->dp.hpd_irq_lock); + kfree(encoder); } @@ -1786,6 +1824,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index); + mutex_init(&nv_encoder->dp.hpd_irq_lock); + if (aux) { if (disp->disp->object.oclass < GF110_DISP) { /* HW has no support for address-only @@ -2077,7 +2117,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) outp->clr.mask, outp->set.mask); if (outp->clr.mask) { - help->disable(encoder); + help->atomic_disable(encoder, state); interlock[NV50_DISP_INTERLOCK_CORE] |= 1; if (outp->flush_disable) { nv50_disp_atomic_commit_wndw(state, interlock); @@ -2116,7 +2156,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) outp->set.mask, outp->clr.mask); if (outp->set.mask) { - help->enable(encoder); + help->atomic_enable(encoder, state); interlock[NV50_DISP_INTERLOCK_CORE] = 1; } @@ -2484,9 +2524,9 @@ nv50_disp_func = { *****************************************************************************/ static void -nv50_display_fini(struct drm_device *dev, bool suspend) +nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend) { - struct nouveau_encoder *nv_encoder; + struct nouveau_drm *drm = nouveau_drm(dev); struct drm_encoder *encoder; struct drm_plane *plane; @@ -2498,11 +2538,12 @@ nv50_display_fini(struct drm_device *dev, bool suspend) } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { - nv_encoder = nouveau_encoder(encoder); - nv50_mstm_fini(nv_encoder->dp.mstm); - } + if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) + nv50_mstm_fini(nouveau_encoder(encoder)); } + + if (!runtime) + cancel_work_sync(&drm->hpd_work); } static int @@ -2519,7 +2560,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - nv50_mstm_init(nv_encoder->dp.mstm, runtime); + nv50_mstm_init(nv_encoder, runtime); } } diff --git a/drivers/gpu/drm/nouveau/include/nvif/push507c.h b/drivers/gpu/drm/nouveau/include/nvif/push507c.h index 889467f13fd9c71c0dd8145faf7770113d509966..7917bead4845de6795c117016896dd5bb74edc7d 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/push507c.h +++ b/drivers/gpu/drm/nouveau/include/nvif/push507c.h @@ -20,6 +20,6 @@ PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \ PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) | \ NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2), \ - "jump 0x%08x - %s", (u32)(o), __func__); \ + " jump 0x%08x - %s", (u32)(o), __func__); \ } while(0) #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7674025a4bfe889e2087d1729130d863bb06d7be..49dd0cbc332ffa0107d9ba9e45867b33dc69ed41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -391,20 +391,6 @@ find_encoder(struct drm_connector *connector, int type) return NULL; } -struct nouveau_connector * -nouveau_encoder_connector_get(struct nouveau_encoder *encoder) -{ - struct drm_device *dev = to_drm_encoder(encoder)->dev; - struct drm_connector *drm_connector; - - list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) { - if (drm_connector->encoder == to_drm_encoder(encoder)) - return nouveau_connector(drm_connector); - } - - return NULL; -} - static void nouveau_connector_destroy(struct drm_connector *connector) { @@ -435,7 +421,8 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) switch (nv_encoder->dcb->type) { case DCB_OUTPUT_DP: - ret = nouveau_dp_detect(nv_encoder); + ret = nouveau_dp_detect(nouveau_connector(connector), + nv_encoder); if (ret == NOUVEAU_DP_MST) return NULL; else if (ret == NOUVEAU_DP_SST) @@ -541,6 +528,17 @@ nouveau_connector_set_encoder(struct drm_connector *connector, } } +static void +nouveau_connector_set_edid(struct nouveau_connector *nv_connector, + struct edid *edid) +{ + struct edid *old_edid = nv_connector->edid; + + drm_connector_update_edid_property(&nv_connector->base, edid); + kfree(old_edid); + nv_connector->edid = edid; +} + static enum drm_connector_status nouveau_connector_detect(struct drm_connector *connector, bool force) { @@ -554,13 +552,6 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) int ret; enum drm_connector_status conn_status = connector_status_disconnected; - /* Cleanup the previous EDID block. */ - if (nv_connector->edid) { - drm_connector_update_edid_property(connector, NULL); - kfree(nv_connector->edid); - nv_connector->edid = NULL; - } - /* Outputs are only polled while runtime active, so resuming the * device here is unnecessary (and would deadlock upon runtime suspend * because it waits for polling to finish). We do however, want to @@ -573,22 +564,23 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) ret = pm_runtime_get_sync(dev->dev); if (ret < 0 && ret != -EACCES) { pm_runtime_put_autosuspend(dev->dev); + nouveau_connector_set_edid(nv_connector, NULL); return conn_status; } } nv_encoder = nouveau_connector_ddc_detect(connector); if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { + struct edid *new_edid; + if ((vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) && nv_connector->type == DCB_CONNECTOR_LVDS) - nv_connector->edid = drm_get_edid_switcheroo(connector, - i2c); + new_edid = drm_get_edid_switcheroo(connector, i2c); else - nv_connector->edid = drm_get_edid(connector, i2c); + new_edid = drm_get_edid(connector, i2c); - drm_connector_update_edid_property(connector, - nv_connector->edid); + nouveau_connector_set_edid(nv_connector, new_edid); if (!nv_connector->edid) { NV_ERROR(drm, "DDC responded, but no EDID for %s\n", connector->name); @@ -622,6 +614,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) conn_status = connector_status_connected; drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid); goto out; + } else { + nouveau_connector_set_edid(nv_connector, NULL); } nv_encoder = nouveau_connector_of_detect(connector); @@ -646,10 +640,11 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) conn_status = connector_status_connected; goto out; } - } out: + if (!nv_connector->edid) + drm_dp_cec_unset_edid(&nv_connector->aux); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -664,18 +659,12 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = NULL; + struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; - /* Cleanup the previous EDID block. */ - if (nv_connector->edid) { - drm_connector_update_edid_property(connector, NULL); - kfree(nv_connector->edid); - nv_connector->edid = NULL; - } - nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS); if (!nv_encoder) - return connector_status_disconnected; + goto out; /* Try retrieving EDID via DDC */ if (!drm->vbios.fp_no_ddc) { @@ -694,7 +683,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) * valid - it's not (rh#613284) */ if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { - if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) { + edid = nouveau_acpi_edid(dev, connector); + if (edid) { status = connector_status_connected; goto out; } @@ -714,12 +704,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) * stored for the panel stored in them. */ if (!drm->vbios.fp_no_ddc) { - struct edid *edid = - (struct edid *)nouveau_bios_embedded_edid(dev); + edid = (struct edid *)nouveau_bios_embedded_edid(dev); if (edid) { - nv_connector->edid = - kmemdup(edid, EDID_LENGTH, GFP_KERNEL); - if (nv_connector->edid) + edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); + if (edid) status = connector_status_connected; } } @@ -732,7 +720,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) status = connector_status_unknown; #endif - drm_connector_update_edid_property(connector, nv_connector->edid); + nouveau_connector_set_edid(nv_connector, edid); nouveau_connector_set_encoder(connector, nv_encoder); return status; } @@ -1150,59 +1138,39 @@ nouveau_connector_funcs_lvds = { .early_unregister = nouveau_connector_early_unregister, }; +void +nouveau_connector_hpd(struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + u32 mask = drm_connector_mask(connector); + + mutex_lock(&drm->hpd_lock); + if (!(drm->hpd_pending & mask)) { + drm->hpd_pending |= mask; + schedule_work(&drm->hpd_work); + } + mutex_unlock(&drm->hpd_lock); +} + static int nouveau_connector_hotplug(struct nvif_notify *notify) { struct nouveau_connector *nv_connector = container_of(notify, typeof(*nv_connector), hpd); struct drm_connector *connector = &nv_connector->base; - struct nouveau_drm *drm = nouveau_drm(connector->dev); + struct drm_device *dev = connector->dev; + struct nouveau_drm *drm = nouveau_drm(dev); const struct nvif_notify_conn_rep_v0 *rep = notify->data; - const char *name = connector->name; - struct nouveau_encoder *nv_encoder; - int ret; bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { - NV_DEBUG(drm, "service %s\n", name); - drm_dp_cec_irq(&nv_connector->aux); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) - nv50_mstm_service(nv_encoder->dp.mstm); - + nouveau_dp_irq(drm, nv_connector); return NVIF_NOTIFY_KEEP; } - ret = pm_runtime_get(drm->dev->dev); - if (ret == 0) { - /* We can't block here if there's a pending PM request - * running, as we'll deadlock nouveau_display_fini() when it - * calls nvif_put() on our nvif_notify struct. So, simply - * defer the hotplug event until the device finishes resuming - */ - NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n", - name); - schedule_work(&drm->hpd_work); - - pm_runtime_put_noidle(drm->dev->dev); - return NVIF_NOTIFY_KEEP; - } else if (ret != 1 && ret != -EACCES) { - NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n", - name, ret); - return NVIF_NOTIFY_DROP; - } - - if (!plugged) - drm_dp_cec_unset_edid(&nv_connector->aux); - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { - if (!plugged) - nv50_mstm_remove(nv_encoder->dp.mstm); - } - - drm_helper_hpd_irq_event(connector->dev); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", connector->name); + nouveau_connector_hpd(connector); - pm_runtime_mark_last_busy(drm->dev->dev); - pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index d6de5cb8e22383b86e1b55b4ad8d820df106ddc0..d0b859c4a80ea64a520a37be94b5b71347a0ee49 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -187,6 +187,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) struct drm_connector * nouveau_connector_create(struct drm_device *, const struct dcb_output *); +void nouveau_connector_hpd(struct drm_connector *connector); extern int nouveau_tv_disable; extern int nouveau_ignorelid; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 5f31b11ac2e7e0cb13b6d1ea3c383d2f3ae8e7b9..bceb48a2dfca6ba30c89fe28716250d7284c7b6d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -457,16 +457,70 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = { } \ } while(0) +void +nouveau_display_hpd_resume(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + mutex_lock(&drm->hpd_lock); + drm->hpd_pending = ~0; + mutex_unlock(&drm->hpd_lock); + + schedule_work(&drm->hpd_work); +} + static void nouveau_display_hpd_work(struct work_struct *work) { struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work); + struct drm_device *dev = drm->dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + u32 pending; + bool changed = false; + + pm_runtime_get_sync(dev->dev); - pm_runtime_get_sync(drm->dev->dev); + mutex_lock(&drm->hpd_lock); + pending = drm->hpd_pending; + drm->hpd_pending = 0; + mutex_unlock(&drm->hpd_lock); - drm_helper_hpd_irq_event(drm->dev); + /* Nothing to do, exit early without updating the last busy counter */ + if (!pending) + goto noop; + + mutex_lock(&dev->mode_config.mutex); + drm_connector_list_iter_begin(dev, &conn_iter); + + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + enum drm_connector_status old_status = connector->status; + u64 old_epoch_counter = connector->epoch_counter; + + if (!(pending & drm_connector_mask(connector))) + continue; + + connector->status = drm_helper_probe_detect(connector, NULL, + false); + if (old_epoch_counter == connector->epoch_counter) + continue; + + changed = true; + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", + connector->base.id, connector->name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status), + old_epoch_counter, connector->epoch_counter); + } + + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); pm_runtime_mark_last_busy(drm->dev->dev); +noop: pm_runtime_put_sync(drm->dev->dev); } @@ -490,12 +544,11 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, */ pm_runtime_put_autosuspend(drm->dev->dev); } else if (ret == 0) { - /* This may be the only indication we receive - * of a connector hotplug on a runtime - * suspended GPU, schedule hpd_work to check. + /* We've started resuming the GPU already, so + * it will handle scheduling a full reprobe + * itself */ NV_DEBUG(drm, "ACPI requested connector reprobe\n"); - schedule_work(&drm->hpd_work); pm_runtime_put_noidle(drm->dev->dev); } else { NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n", @@ -569,7 +622,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) cancel_work_sync(&drm->hpd_work); drm_kms_helper_poll_disable(dev); - disp->fini(dev, suspend); + disp->fini(dev, runtime, suspend); } static void @@ -686,6 +739,7 @@ nouveau_display_create(struct drm_device *dev) } INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work); + mutex_init(&drm->hpd_lock); #ifdef CONFIG_ACPI drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy; register_acpi_notifier(&drm->acpi_nb); @@ -705,9 +759,10 @@ void nouveau_display_destroy(struct drm_device *dev) { struct nouveau_display *disp = nouveau_display(dev); + struct nouveau_drm *drm = nouveau_drm(dev); #ifdef CONFIG_ACPI - unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb); + unregister_acpi_notifier(&drm->acpi_nb); #endif drm_kms_helper_poll_fini(dev); @@ -719,6 +774,7 @@ nouveau_display_destroy(struct drm_device *dev) nvif_disp_dtor(&disp->disp); nouveau_drm(dev)->display = NULL; + mutex_destroy(&drm->hpd_lock); kfree(disp); } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 6e0d900441d69bf176939dd7acad17d3fad082b3..616c434270594a8cfc46de676b1250b0cb9aacd8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -18,7 +18,7 @@ struct nouveau_display { void *priv; void (*dtor)(struct drm_device *); int (*init)(struct drm_device *, bool resume, bool runtime); - void (*fini)(struct drm_device *, bool suspend); + void (*fini)(struct drm_device *, bool suspend, bool runtime); struct nvif_disp disp; @@ -45,6 +45,7 @@ nouveau_display(struct drm_device *dev) int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime); +void nouveau_display_hpd_resume(struct drm_device *dev); void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime); int nouveau_display_suspend(struct drm_device *dev, bool runtime); void nouveau_display_resume(struct drm_device *dev, bool runtime); diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 8a0f7994e1aeb623c17375b8d2ecbca9ac4915a4..810bf69565683f6d103b744183f87c561c5d8358 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -36,50 +36,123 @@ MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)"); static int nouveau_mst = 1; module_param_named(mst, nouveau_mst, int, 0400); -static void -nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd) +static bool +nouveau_dp_has_sink_count(struct drm_connector *connector, + struct nouveau_encoder *outp) { - struct nouveau_drm *drm = nouveau_drm(dev); - u8 buf[3]; - - if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) - return; - - if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3)) - NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n", - buf[0], buf[1], buf[2]); - - if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3)) - NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n", - buf[0], buf[1], buf[2]); + return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc); +} +static enum drm_connector_status +nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector, + struct nouveau_encoder *outp) +{ + struct drm_connector *connector = &nv_connector->base; + struct drm_dp_aux *aux = &nv_connector->aux; + struct nv50_mstm *mstm = NULL; + enum drm_connector_status status = connector_status_disconnected; + int ret; + u8 *dpcd = outp->dp.dpcd; + + ret = drm_dp_read_dpcd_caps(aux, dpcd); + if (ret < 0) + goto out; + + ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd)); + if (ret < 0) + goto out; + + if (nouveau_mst) { + mstm = outp->dp.mstm; + if (mstm) + mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd); + } + + if (nouveau_dp_has_sink_count(connector, outp)) { + ret = drm_dp_read_sink_count(aux); + if (ret < 0) + goto out; + + outp->dp.sink_count = ret; + + /* + * Dongle connected, but no display. Don't bother reading + * downstream port info + */ + if (!outp->dp.sink_count) + return connector_status_disconnected; + } + + ret = drm_dp_read_downstream_info(aux, dpcd, + outp->dp.downstream_ports); + if (ret < 0) + goto out; + + status = connector_status_connected; +out: + if (status != connector_status_connected) { + /* Clear any cached info */ + outp->dp.sink_count = 0; + } + return status; } int -nouveau_dp_detect(struct nouveau_encoder *nv_encoder) +nouveau_dp_detect(struct nouveau_connector *nv_connector, + struct nouveau_encoder *nv_encoder) { struct drm_device *dev = nv_encoder->base.base.dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_i2c_aux *aux; - u8 dpcd[8]; - int ret; - - aux = nv_encoder->aux; - if (!aux) - return -ENODEV; - - ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, sizeof(dpcd)); - if (ret) - return ret; + struct drm_connector *connector = &nv_connector->base; + struct nv50_mstm *mstm = nv_encoder->dp.mstm; + enum drm_connector_status status; + u8 *dpcd = nv_encoder->dp.dpcd; + int ret = NOUVEAU_DP_NONE; + + /* If we've already read the DPCD on an eDP device, we don't need to + * reread it as it won't change + */ + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + dpcd[DP_DPCD_REV] != 0) + return NOUVEAU_DP_SST; - nv_encoder->dp.link_bw = 27000 * dpcd[1]; - nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; + mutex_lock(&nv_encoder->dp.hpd_irq_lock); + if (mstm) { + /* If we're not ready to handle MST state changes yet, just + * report the last status of the connector. We'll reprobe it + * once we've resumed. + */ + if (mstm->suspended) { + if (mstm->is_mst) + ret = NOUVEAU_DP_MST; + else if (connector->status == + connector_status_connected) + ret = NOUVEAU_DP_SST; + + goto out; + } + } + + status = nouveau_dp_probe_dpcd(nv_connector, nv_encoder); + if (status == connector_status_disconnected) + goto out; + + /* If we're in MST mode, we're done here */ + if (mstm && mstm->can_mst && mstm->is_mst) { + ret = NOUVEAU_DP_MST; + goto out; + } + + nv_encoder->dp.link_bw = 27000 * dpcd[DP_MAX_LINK_RATE]; + nv_encoder->dp.link_nr = + dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n", - nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]); + nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, + dpcd[DP_DPCD_REV]); NV_DEBUG(drm, "encoder: %dx%d\n", - nv_encoder->dcb->dpconf.link_nr, - nv_encoder->dcb->dpconf.link_bw); + nv_encoder->dcb->dpconf.link_nr, + nv_encoder->dcb->dpconf.link_bw); if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr) nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; @@ -87,23 +160,68 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder) nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw; NV_DEBUG(drm, "maximum: %dx%d\n", - nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); + nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); + + if (mstm && mstm->can_mst) { + ret = nv50_mstm_detect(nv_encoder); + if (ret == 1) { + ret = NOUVEAU_DP_MST; + goto out; + } else if (ret != 0) { + goto out; + } + } + ret = NOUVEAU_DP_SST; + +out: + if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST) + nv50_mstm_remove(mstm); + + mutex_unlock(&nv_encoder->dp.hpd_irq_lock); + return ret; +} - nouveau_dp_probe_oui(dev, aux, dpcd); +void nouveau_dp_irq(struct nouveau_drm *drm, + struct nouveau_connector *nv_connector) +{ + struct drm_connector *connector = &nv_connector->base; + struct nouveau_encoder *outp = find_encoder(connector, DCB_OUTPUT_DP); + struct nv50_mstm *mstm; + int ret; + bool send_hpd = false; - ret = nv50_mstm_detect(nv_encoder->dp.mstm, dpcd, nouveau_mst); - if (ret == 1) - return NOUVEAU_DP_MST; - if (ret == 0) - return NOUVEAU_DP_SST; - return ret; + if (!outp) + return; + + mstm = outp->dp.mstm; + NV_DEBUG(drm, "service %s\n", connector->name); + + mutex_lock(&outp->dp.hpd_irq_lock); + + if (mstm && mstm->is_mst) { + if (!nv50_mstm_service(drm, nv_connector, mstm)) + send_hpd = true; + } else { + drm_dp_cec_irq(&nv_connector->aux); + + if (nouveau_dp_has_sink_count(connector, outp)) { + ret = drm_dp_read_sink_count(&nv_connector->aux); + if (ret != outp->dp.sink_count) + send_hpd = true; + if (ret >= 0) + outp->dp.sink_count = ret; + } + } + + mutex_unlock(&outp->dp.hpd_irq_lock); + + if (send_hpd) + nouveau_connector_hpd(connector); } /* TODO: * - Use the minimum possible BPC here, once we add support for the max bpc * property. - * - Validate the mode against downstream port caps (see - * drm_dp_downstream_max_clock()) * - Validate against the DP caps advertised by the GPU (we don't check these * yet) */ @@ -114,15 +232,19 @@ nv50_dp_mode_valid(struct drm_connector *connector, unsigned *out_clock) { const unsigned min_clock = 25000; - unsigned max_clock, clock; + unsigned max_clock, ds_clock, clock; enum drm_mode_status ret; if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace) return MODE_NO_INTERLACE; max_clock = outp->dp.link_nr * outp->dp.link_bw; - clock = mode->clock * (connector->display_info.bpc * 3) / 10; + ds_clock = drm_dp_downstream_max_clock(outp->dp.dpcd, + outp->dp.downstream_ports); + if (ds_clock) + max_clock = min(max_clock, ds_clock); + clock = mode->clock * (connector->display_info.bpc * 3) / 10; ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock, &clock); if (out_clock) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 22d246acc5e527ea4019f3baa64de0e812405497..42fc5c813a9bbfd7195514af99eb20b6456854c0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -953,7 +953,7 @@ nouveau_pmops_resume(struct device *dev) ret = nouveau_do_resume(drm_dev, false); /* Monitors may have been connected / disconnected during suspend */ - schedule_work(&nouveau_drm(drm_dev)->hpd_work); + nouveau_display_hpd_resume(drm_dev); return ret; } @@ -1036,7 +1036,7 @@ nouveau_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; /* Monitors may have been connected / disconnected during suspend */ - schedule_work(&nouveau_drm(drm_dev)->hpd_work); + nouveau_display_hpd_resume(drm_dev); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 26a2c1090045e50875060fdb4f5b1ec709b68d04..b8025507a9e4c9de9331979cf7f8a7a7bbed675a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -200,6 +200,8 @@ struct nouveau_drm { struct nvbios vbios; struct nouveau_display *display; struct work_struct hpd_work; + struct mutex hpd_lock; + u32 hpd_pending; struct work_struct fbcon_work; int fbcon_new_state; #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index a72c412ac8b14887959fd64f7f49074e50c3d79c..21937f1c7dd90e8ced809e081755a2898db770af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -33,6 +33,7 @@ #include #include "dispnv04/disp.h" struct nv50_head_atom; +struct nouveau_connector; #define NV_DPMS_CLEARED 0x80 @@ -64,6 +65,17 @@ struct nouveau_encoder { struct nv50_mstm *mstm; int link_nr; int link_bw; + + /* Protects DP state that needs to be accessed outside + * connector reprobing contexts + */ + struct mutex hpd_irq_lock; + + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + struct drm_dp_desc desc; + + u8 sink_count; } dp; }; @@ -77,6 +89,21 @@ struct nouveau_encoder { struct nv50_head_atom *, u8 proto, u8 depth); }; +struct nv50_mstm { + struct nouveau_encoder *outp; + + struct drm_dp_mst_topology_mgr mgr; + + /* Protected under nouveau_encoder->dp.hpd_irq_lock */ + bool can_mst; + bool is_mst; + bool suspended; + + bool modified; + bool disabled; + int links; +}; + struct nouveau_encoder * find_encoder(struct drm_connector *connector, int type); @@ -100,20 +127,29 @@ get_slave_funcs(struct drm_encoder *enc) /* nouveau_dp.c */ enum nouveau_dp_status { + NOUVEAU_DP_NONE, NOUVEAU_DP_SST, NOUVEAU_DP_MST, }; -int nouveau_dp_detect(struct nouveau_encoder *); +int nouveau_dp_detect(struct nouveau_connector *, struct nouveau_encoder *); +void nouveau_dp_irq(struct nouveau_drm *drm, + struct nouveau_connector *nv_connector); enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *, struct nouveau_encoder *, const struct drm_display_mode *, unsigned *clock); struct nouveau_connector * -nouveau_encoder_connector_get(struct nouveau_encoder *encoder); - -int nv50_mstm_detect(struct nv50_mstm *, u8 dpcd[8], int allow); -void nv50_mstm_remove(struct nv50_mstm *); -void nv50_mstm_service(struct nv50_mstm *); +nv50_outp_get_new_connector(struct nouveau_encoder *outp, + struct drm_atomic_state *state); +struct nouveau_connector * +nv50_outp_get_old_connector(struct nouveau_encoder *outp, + struct drm_atomic_state *state); + +int nv50_mstm_detect(struct nouveau_encoder *encoder); +void nv50_mstm_remove(struct nv50_mstm *mstm); +bool nv50_mstm_service(struct nouveau_drm *drm, + struct nouveau_connector *nv_connector, + struct nv50_mstm *mstm); #endif /* __NOUVEAU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 0fa8f677b647cb0da444b5abdda45f5a32775427..5c027c81760f47861f04baa4f03d0620bece6eb5 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -597,7 +597,7 @@ static void venc_bridge_mode_set(struct drm_bridge *bridge, switch (venc_mode) { default: WARN_ON_ONCE(1); - /* Fall-through */ + fallthrough; case VENC_MODE_PAL: venc->config = &venc_config_pal_trm; break; diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 6d40914675dade9dddfee9f634907cc79e84fb41..328a4a74f534e0685babecfac49a3ee312d5fa12 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -451,11 +451,12 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, if (omap_state->manually_updated) return; - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_vblank_on(crtc); + ret = drm_crtc_vblank_get(crtc); WARN_ON(ret != 0); + spin_lock_irq(&crtc->dev->event_lock); omap_crtc_arm_event(crtc); spin_unlock_irq(&crtc->dev->event_lock); } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index fa79688013b7dc21934f6776764d1934fd188bd0..6063f3a153290d4976e17259655d9cee556c3182 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -186,7 +187,7 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev) DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); qxl_update_offset_props(qdev); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); if (!drm_helper_hpd_irq_event(dev)) { /* notify that the monitor configuration changed, to adjust at the arbitrary resolution */ @@ -431,7 +432,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, clips, num_clips, inc, 0); out_lock_end: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(fb->dev, ctx, ret); return 0; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index ba20c6f037198c2a41894481a19cd815b9d9a48c..886e9959496fe51f373a62f7ba580a662e635b4e 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4856,7 +4856,7 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic pi->force_pcie_gen = RADEON_PCIE_GEN2; if (current_link_speed == RADEON_PCIE_GEN2) break; - /* fall through */ + fallthrough; case RADEON_PCIE_GEN2: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 3b7ead5be5bfe1230df6a078c0f92e42615e6d99..73f67bf222e1648894f58224f1f72b6a2dc4016c 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -820,7 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ((idx_value >> 21) & 0xF)); return -EINVAL; } - /* Fall through. */ + fallthrough; case 6: track->cb[i].cpp = 4; break; @@ -971,7 +971,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return -EINVAL; } /* The same rules apply as for DXT3/5. */ - /* Fall through. */ + fallthrough; case R300_TX_FORMAT_DXT3: case R300_TX_FORMAT_DXT5: track->textures[i].cpp = 1; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 1d4c04e0a44949bf4f33178fc2626d57d747f915..50b89b6d9a6ce628411c8c8658fe7ba1bf5b9d5a 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -115,7 +115,7 @@ void r420_pipes_init(struct radeon_device *rdev) default: /* force to 1 pipe */ num_pipes = 1; - /* fall through */ + fallthrough; case 1: tmp = (0 << 1); break; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 49e8266461f85222fc0550c60547127e0fe38fbd..390a9621604ae0f08e032fd9ef6849a2ca58e681 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -487,7 +487,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) return -EINVAL; } } - /* fall through */ + fallthrough; case V_0280A0_CLEAR_ENABLE: { uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); @@ -1535,7 +1535,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, break; case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: is_array = true; - /* fall through */ + fallthrough; case V_038000_SQ_TEX_DIM_2D_MSAA: array_check.nsamples = 1 << llevel; llevel = 0; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e0ae911ef427dd028ad3c606dae0f1d61d1c7b7a..7b69d6dfe44a37455ee73147f5f88d78fe409f17 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, /* get matching reference and feedback divider */ *ref_div = min(max(den/post_div, 1u), ref_div_max); - *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); + *fb_div = max(nom * *ref_div * post_div / den, 1u); /* limit fb divider to its maximum */ if (*fb_div > fb_div_max) { diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index f178ba3217151e985b6b5af00959fe9f08af178d..3808a753127bcb7c1eb861ea85bc85e09e0b7c12 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -72,8 +72,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) { void *ptr; - ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, - &rdev->gart.table_addr); + ptr = dma_alloc_coherent(&rdev->pdev->dev, rdev->gart.table_size, + &rdev->gart.table_addr, GFP_KERNEL); if (ptr == NULL) { return -ENOMEM; } @@ -85,7 +85,6 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) } #endif rdev->gart.ptr = ptr; - memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); return 0; } @@ -110,9 +109,8 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev) rdev->gart.table_size >> PAGE_SHIFT); } #endif - pci_free_consistent(rdev->pdev, rdev->gart.table_size, - (void *)rdev->gart.ptr, - rdev->gart.table_addr); + dma_free_coherent(&rdev->pdev->dev, rdev->gart.table_size, + (void *)rdev->gart.ptr, rdev->gart.table_addr); rdev->gart.ptr = NULL; rdev->gart.table_addr = 0; } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1ad5c3b86b640e67372d32033b7d9857166aad0f..57fb3eb3a4b45a0af9864625503cd49426cf05ef 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -454,7 +454,7 @@ static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, if (p->rdev->family >= CHIP_PALM) return 0; - /* fall through */ + fallthrough; default: DRM_ERROR("UVD codec not supported by hardware %d!\n", stream_type); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index a167e1c36d2432719e1f3032d8e97577a074bc66..d1c73e9db889aee87b6af7dddb3b99b5f86c1511 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -5744,7 +5744,7 @@ static void si_request_link_speed_change_before_state_change(struct radeon_devic si_pi->force_pcie_gen = RADEON_PCIE_GEN2; if (current_link_speed == RADEON_PCIE_GEN2) break; - /* fall through */ + fallthrough; case RADEON_PCIE_GEN2: if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index f858d8d063476becf391f79a7aef06e3fb48f30a..800721153d51a2a404c121834d20236d992c65d6 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -219,7 +219,7 @@ int uvd_v1_0_init(struct radeon_device *rdev) WREG32(RS_DQ_RD_RET_CONF, 0x3f); WREG32(MC_CONFIG, 0x1f); - /* fall through */ + fallthrough; case CHIP_RV670: case CHIP_RV635: diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index a2ac25c11c90384ad73d377ec4ed314b2ada0467..e0d40ae67d54240e90e7a6499f2cb595bf70662f 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c @@ -306,7 +306,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", @@ -444,7 +444,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of vertices %u in TRILIST\n", @@ -566,7 +566,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); @@ -705,7 +705,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, case SAVAGE_PRIM_TRILIST_201: reorder = 1; prim = SAVAGE_PRIM_TRILIST; - /* fall through */ + fallthrough; case SAVAGE_PRIM_TRILIST: if (n % 3 != 0) { DRM_ERROR("wrong number of indices %u in TRILIST\n", n); @@ -1066,7 +1066,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ ret = -EINVAL; goto done; } - /* fall through */ + fallthrough; case SAVAGE_CMD_DMA_PRIM: case SAVAGE_CMD_VB_PRIM: if (!first_draw_cmd) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 96f763d888af596a7df80099e579abab3e4649ea..9a0d77a6801807129b457040540569a5730656f3 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -625,7 +625,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) return NULL; /* Kernel run queue has higher priority than normal run queue*/ - for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); if (entity) break; @@ -852,7 +852,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->name = name; sched->timeout = timeout; sched->hang_limit = hang_limit; - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) drm_sched_rq_init(sched, &sched->sched_rq[i]); init_waitqueue_head(&sched->wake_up_worker); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 008f07923bbc9f385f28bf02d61e574f8284aa67..38a558768e5311318be452425b6bbadfb023e44c 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -850,13 +850,13 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi) switch (info->channels) { case 8: audio_cfg |= HDMI_AUD_CFG_CH78_VALID; - /* fall through */ + fallthrough; case 6: audio_cfg |= HDMI_AUD_CFG_CH56_VALID; - /* fall through */ + fallthrough; case 4: audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH; - /* fall through */ + fallthrough; case 2: audio_cfg |= HDMI_AUD_CFG_CH12_VALID; break; diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index f025534eb30ccc4b0bba6d45f92c3a842971850e..2f26f85ef538439c867dca2a21e23883ff1b07ce 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, /* We can't have an alpha plane at the lowest position */ if (!backend->quirks->supports_lowest_plane_alpha && - (plane_states[0]->fb->format->has_alpha || - (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))) + (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)) return -EINVAL; for (i = 1; i < num_planes; i++) { @@ -995,7 +994,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = { static const struct sun4i_backend_quirks sun7i_backend_quirks = { .needs_output_muxing = true, - .supports_lowest_plane_alpha = true, }; static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 09ad8506a81bef69835fe13c3c247dcdcefdf705..eaaf5d70e35299412c1e026d61c06646ca635de6 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -195,7 +195,7 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon, switch (encoder->encoder_type) { case DRM_MODE_ENCODER_LVDS: is_lvds = true; - /* Fallthrough */ + fallthrough; case DRM_MODE_ENCODER_DSI: case DRM_MODE_ENCODER_NONE: channel = 0; @@ -342,7 +342,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon, /* R and B components are only 5 bits deep */ val |= SUN4I_TCON0_FRM_CTL_MODE_R; val |= SUN4I_TCON0_FRM_CTL_MODE_B; - /* Fall through */ + fallthrough; case MEDIA_BUS_FMT_RGB666_1X18: case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: /* Fall through: enable dithering */ @@ -1431,14 +1431,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); - if (ret) + if (ret) { + put_device(&pdev->dev); return ret; + } } if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) { ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); - if (ret) + if (ret) { + put_device(&pdev->dev); return ret; + } } return 0; diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 9abde2817e1b355feb68886fcd9e29b91824580b..4f5efcace68ea515c1114224c31f65c86f742f65 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -889,7 +889,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0), sun6i_dsi_dcs_build_pkt_hdr(dsi, msg)); - bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL); + bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL); if (!bounce) return -ENOMEM; @@ -900,7 +900,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc)); len += sizeof(crc); - regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len); + regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4)); regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1); kfree(bounce); @@ -1024,7 +1024,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host, ret = sun6i_dsi_dcs_read(dsi, msg); break; } - /* Else, fall through */ + fallthrough; default: ret = -EINVAL; diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index a360697a4a4ae75024662c518309eb548e6e53be..76393fc976fee3131b1ea7d2efcb6ccd809041bf 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -211,7 +211,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, return 0; } -static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) +static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) { if (!format->is_yuv) return SUN8I_CSC_MODE_OFF; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 9a0b3240bc5823d0af0edca13f7f0611d25cd204..424ad60b4f388c1e5f492ac26457849fa11bb7b1 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -135,7 +135,7 @@ static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v, default: WARN_ON_ONCE(1); - /* fallthrough */ + fallthrough; case 4: max = 4; break; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 1856962411c779d698f4d88933a819097741c9e9..518220bd092a646330544fc494ac9cfc51bd8e92 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -386,7 +386,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB8888: reg |= LCDC_V2_TFT_24BPP_UNPACK; - /* fallthrough */ + fallthrough; case DRM_FORMAT_BGR888: case DRM_FORMAT_RGB888: reg |= LCDC_V2_TFT_24BPP_MODE; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0e5497d80686309e7cf4e1e74b0fe680b5812e24..98a006fc30a58db3b1e097ca309af1d8556a6765 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -505,7 +505,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, if (unlikely(ret != 0)) return ret; } - /* fall through */ + fallthrough; case TTM_PL_TT: ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); break; diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index d733bbc4ac0e5d95f55b1d4cefae688854541eea..17ff24d999d18296b4c1b03bf1a700144fcbdaa5 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, struct drm_connector *connector = priv->connector; u32 format = fb->format->format; u32 ctrl1 = 0; + int retries; clk_prepare_enable(priv->clk); + /* Reset the TVE200 and wait for it to come back online */ + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); + for (retries = 0; retries < 5; retries++) { + usleep_range(30000, 50000); + if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) + continue; + else + break; + } + if (retries == 5 && + readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) { + dev_err(drm->dev, "can't get hardware out of reset\n"); + return; + } + /* Function 1 */ ctrl1 |= TVE200_CTRL_CSMODE; /* Interlace mode for CCIR656: parameterize? */ @@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe) drm_crtc_vblank_off(crtc); - /* Disable and Power Down */ + /* Disable put into reset and Power Down */ writel(0, priv->regs + TVE200_CTRL); + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); clk_disable_unprepare(priv->clk); } @@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe) struct drm_device *drm = crtc->dev; struct tve200_drm_dev_private *priv = drm->dev_private; + /* Clear any IRQs and enable */ + writel(0xFF, priv->regs + TVE200_INT_CLR); writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN); return 0; } diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 551fa31629afb8a8eec8aa76c1d3e89bff252cec..5771bb53ce6a51ac8a120bb2997c3433fade337f 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -179,21 +179,21 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) switch (vsg->state) { case dr_via_device_mapped: via_unmap_blit_from_device(pdev, vsg); - /* fall through */ + fallthrough; case dr_via_desc_pages_alloc: for (i = 0; i < vsg->num_desc_pages; ++i) { if (vsg->desc_pages[i] != NULL) free_page((unsigned long)vsg->desc_pages[i]); } kfree(vsg->desc_pages); - /* fall through */ + fallthrough; case dr_via_pages_locked: unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE)); - /* fall through */ + fallthrough; case dr_via_pages_alloc: vfree(vsg->pages); - /* fall through */ + fallthrough; default: vsg->state = dr_via_sg_init; } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 7a2430e34e003b1edc82a10ef46945d70aedcf3b..c8da7adc6b307b5c690335d5edec7bc27eb9a543 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -179,6 +179,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, vfpriv->ctx_id, buflist, out_fence); + dma_fence_put(&out_fence->f); virtio_gpu_notify(vgdev); return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 729f98ad7c02039ced5fa52666aee81919fa45ff..842f8b61aa89708d67939ed144444b16e6b1cbac 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) } sg_free_table(shmem->pages); + kfree(shmem->pages); shmem->pages = NULL; drm_gem_shmem_unpin(&bo->base.base); } diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 013c9e0e412cdca93d973772f5166686ff693cba..cc93a8c9547bcc76814294713ae1c8910b30ca75 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -649,9 +649,7 @@ static void displback_changed(struct xenbus_device *xb_dev, switch (backend_state) { case XenbusStateReconfiguring: - /* fall through */ case XenbusStateReconfigured: - /* fall through */ case XenbusStateInitialised: break; @@ -701,7 +699,6 @@ static void displback_changed(struct xenbus_device *xb_dev, break; case XenbusStateUnknown: - /* fall through */ case XenbusStateClosed: if (xb_dev->state == XenbusStateClosed) break; diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index aed7510e271083fe869f73b7943fd9da67b90c2c..a8aefaa38bd360c3969212b93be11a8b072666ff 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -18,6 +18,7 @@ #include #include +#include #include "xen_drm_front.h" #include "xen_drm_front_gem.h" @@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) * allocate ballooned pages which will be used to map * grant references provided by the backend */ - ret = alloc_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); if (ret < 0) { DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", xen_obj->num_pages, ret); @@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) } else { if (xen_obj->pages) { if (xen_obj->be_alloc) { - free_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + xen_free_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); gem_free_pages_array(xen_obj); } else { drm_gem_put_pages(&xen_obj->base, diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index aa6cd889bd119f726531a23cc2392a352f901334..b52c6cdfc0b872d68dacd591ddfa294ed148d979 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -2,6 +2,7 @@ config DRM_ZYNQMP_DPSUB tristate "ZynqMP DisplayPort Controller Driver" depends on ARCH_ZYNQMP || COMPILE_TEST depends on COMMON_CLK && DRM && OF + depends on DMADEVICES select DMA_ENGINE select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index dbcc167219318f5145b5da058fd9994399af8c72..34b4075a6a8e531701a0fd43e708b3caec419106 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -141,7 +141,7 @@ static int ipu_bus_format_to_map(u32 fmt) switch (fmt) { default: WARN_ON(1); - /* fall-through */ + fallthrough; case MEDIA_BUS_FMT_RGB888_1X24: return IPU_DC_MAP_RGB24; case MEDIA_BUS_FMT_RGB565_1X16: diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 359616e3efbbb244e633b4a37ae6361c30abf19f..d2ecc9c4525548c5ae4f1f1a54de70d61c52a91d 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1597,6 +1597,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1609,7 +1620,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1739,7 +1750,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE - 1; diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c index 4ff3bc1d25e23fe91b4d8460e4ab13d934ea14c2..28d671c5e0cac27e06ae9a73d464f1fa0bcd188a 100644 --- a/drivers/hid/hid-cougar.c +++ b/drivers/hid/hid-cougar.c @@ -321,7 +321,7 @@ static const struct kernel_param_ops cougar_g6_is_space_ops = { }; module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644); -static struct hid_device_id cougar_id_table[] = { +static const struct hid_device_id cougar_id_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 45c4f888b7c4e61b9adf8f3c2101f337ae14870c..dae193749d443490390255df260fda27edc3e531 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -188,6 +188,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); + input_free_device(input); return ret; } @@ -198,6 +199,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) if (ret) { hid_err(hdev, "Failed to register elan input device: %d\n", ret); + input_mt_destroy_slots(input); input_free_device(input); return ret; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 6221888aae99fd402e788b4c7c857d1581f7c6c4..74fc1df6e3c2726135791dd4a19063bc52fb23cf 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -728,6 +728,9 @@ #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093 #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 @@ -847,6 +850,7 @@ #define USB_DEVICE_ID_MS_POWER_COVER 0x07da #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb +#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -1012,6 +1016,8 @@ #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 #define USB_DEVICE_ID_SAITEK_X52 0x075c +#define USB_DEVICE_ID_SAITEK_X52_2 0x0255 +#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index b8eabf206e7439908830e4c6d7218127ec5a73e8..88e19996427e63aad2ac1816c7f1a7039dc69921 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1132,6 +1132,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c index ef0cbcd7540d564e91e8f70c03aa947a410ce1c9..fcaf8466e627d51da31c781bbbc2f820c80b405d 100644 --- a/drivers/hid/hid-lg-g15.c +++ b/drivers/hid/hid-lg-g15.c @@ -680,7 +680,7 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i) * but it does have a separate power-on (reset) value. */ g15->leds[i].cdev.name = "g15::power_on_backlight_val"; - /* fall through */ + fallthrough; case LG_G15_KBD_BRIGHTNESS: g15->leds[i].cdev.brightness_set_blocking = lg_g510_kbd_led_set; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index a78c13cc9f470f555a9a63553ef187cbc8e2d041..38ee25a813b9e2bec38c44ed2d6486ff5a5fb54c 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -844,7 +844,7 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev, workitem.type = WORKITEM_TYPE_EMPTY; break; } - /* fall-through */ + fallthrough; case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: workitem.quad_id_msb = dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]; diff --git a/drivers/hid/hid-macally.c b/drivers/hid/hid-macally.c index 9a4fc7dffb14d551689b0b5e7ea5eefcfeaa4625..aea46e5220082acae14b78097bb27d4a13b84f65 100644 --- a/drivers/hid/hid-macally.c +++ b/drivers/hid/hid-macally.c @@ -29,7 +29,7 @@ static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } -static struct hid_device_id macally_id_table[] = { +static const struct hid_device_id macally_id_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD) }, { } diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index 2d8b589201a4e80c7d2dc7c4a6df35344840bd9a..071fd093a5f4e139893ccf53befe23f14b8bf974 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -163,16 +163,13 @@ static int ms_surface_dial_quirk(struct hid_input *hi, struct hid_field *field, { switch (usage->hid & HID_USAGE_PAGE) { case 0xff070000: - /* fall-through */ case HID_UP_DIGITIZER: /* ignore those axis */ return -1; case HID_UP_GENDESK: switch (usage->hid) { case HID_GD_X: - /* fall-through */ case HID_GD_Y: - /* fall-through */ case HID_GD_RFKILL_BTN: /* ignore those axis */ return -1; @@ -451,6 +448,8 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_SURFACE_DIAL }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER), .driver_data = MS_QUIRK_FF }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS), + .driver_data = MS_QUIRK_FF }, { } }; MODULE_DEVICE_TABLE(hid, ms_devices); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 3f94b4954225b38ea6fa4bea9194bebb1ab8b383..e3152155c4b85de7bac90441f821712a6cc9fbe2 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -856,6 +856,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index c242150d35a3afa7f5c4ffcd85003e6073dbf550..7a2be0205dfd12a81a82e963893ca664512bb526 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -105,6 +105,9 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET }, @@ -147,6 +150,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 8cffa84c9650318cdb3fa8beac4011ac592f16b4..7f41213d5ae3430be88e7ac499c39f68daa9ae52 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -428,7 +428,6 @@ static void rmi_report(struct hid_device *hid, struct hid_report *report) switch (report->id) { case RMI_READ_DATA_REPORT_ID: - /* fall-through */ case RMI_ATTN_REPORT_ID: return; } diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 1a6e600197d0ba99bb5284385953609686bd867c..2ff4c8e366ff2fc10f0cd909d88147078a5de778 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -780,7 +780,7 @@ static void kone_keep_values_up_to_date(struct kone_device *kone, case kone_mouse_event_switch_profile: kone->actual_dpi = kone->profiles[event->value - 1]. startup_dpi; - /* fall through */ + fallthrough; case kone_mouse_event_osd_profile: kone->actual_profile = event->value; break; diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 78a364ae2f68580913c2e0e589cc9156ca4b8140..7d20d1fcf8d208f32bda368e7b33b516c76606a0 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -974,7 +974,7 @@ int uclogic_params_init(struct uclogic_params *params, } break; } - /* FALL THROUGH */ + fallthrough; case VID_PID(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET): case VID_PID(USB_VENDOR_ID_HUION, diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c index 679e142fc850c992f3f417513164a38405788676..e484c3618decafc1925b2343bfd6c2f462c139ba 100644 --- a/drivers/hid/hid-wiimote-core.c +++ b/drivers/hid/hid-wiimote-core.c @@ -1672,7 +1672,6 @@ static ssize_t wiimote_ext_show(struct device *dev, case WIIMOTE_EXT_GUITAR: return sprintf(buf, "guitar\n"); case WIIMOTE_EXT_UNKNOWN: - /* fallthrough */ default: return sprintf(buf, "unknown\n"); } @@ -1722,7 +1721,6 @@ static ssize_t wiimote_dev_show(struct device *dev, case WIIMOTE_DEV_PENDING: return sprintf(buf, "pending\n"); case WIIMOTE_DEV_UNKNOWN: - /* fallthrough */ default: return sprintf(buf, "unknown\n"); } diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 294c84e136d72686842516c9422ece66739b724e..dbd04492825d473a354d114085d3870b71de805e 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -420,6 +420,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) dev_err(&client->dev, "failed to change power setting.\n"); set_pwr_exit: + + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests. + * According to Goodix Windows even waits 60 ms after (other?) + * PWR_ON requests. Testing has confirmed that several devices + * will not work properly without a delay after a PWR_ON request. + */ + if (!ret && power_state == I2C_HID_PWR_ON) + msleep(60); + return ret; } @@ -441,15 +454,6 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; - /* - * The HID over I2C specification states that if a DEVICE needs time - * after the PWR_ON request, it should utilise CLOCK stretching. - * However, it has been observered that the Windows driver provides a - * 1ms sleep between the PWR_ON and RESET requests and that some devices - * rely on this. - */ - usleep_range(1000, 5000); - i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 492dd641a25df2225c2d61915e4e7ab6c5a95e12..17a29ee0ac6c23346e2502df0130dc9a50d89db1 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -26,7 +26,6 @@ #include #include #include -#include #include @@ -96,18 +95,6 @@ static int hid_start_in(struct hid_device *hid) set_bit(HID_NO_BANDWIDTH, &usbhid->iofl); } else { clear_bit(HID_NO_BANDWIDTH, &usbhid->iofl); - - if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { - /* - * In case events are generated while nobody was - * listening, some are released when the device - * is re-opened. Wait 50 msec for the queue to - * empty before allowing events to go through - * hid. - */ - usbhid->input_start_time = - ktime_add_ms(ktime_get_coarse(), 50); - } } } spin_unlock_irqrestore(&usbhid->lock, flags); @@ -293,23 +280,20 @@ static void hid_irq_in(struct urb *urb) if (!test_bit(HID_OPENED, &usbhid->iofl)) break; usbhid_mark_busy(usbhid); - if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { - if (ktime_before(ktime_get_coarse(), - usbhid->input_start_time)) - break; - clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); + if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { + hid_input_report(urb->context, HID_INPUT_REPORT, + urb->transfer_buffer, + urb->actual_length, 1); + /* + * autosuspend refused while keys are pressed + * because most keyboards don't wake up when + * a key is released + */ + if (hid_check_keys_pressed(hid)) + set_bit(HID_KEYS_PRESSED, &usbhid->iofl); + else + clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); } - hid_input_report(urb->context, HID_INPUT_REPORT, - urb->transfer_buffer, urb->actual_length, 1); - /* - * autosuspend refused while keys are pressed - * because most keyboards don't wake up when - * a key is released - */ - if (hid_check_keys_pressed(hid)) - set_bit(HID_KEYS_PRESSED, &usbhid->iofl); - else - clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); break; case -EPIPE: /* stall */ usbhid_mark_busy(usbhid); @@ -736,6 +720,17 @@ static int usbhid_open(struct hid_device *hid) usb_autopm_put_interface(usbhid->intf); + /* + * In case events are generated while nobody was listening, + * some are released when the device is re-opened. + * Wait 50 msec for the queue to empty before allowing events + * to go through hid. + */ + if (res == 0) + msleep(50); + + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); + Done: mutex_unlock(&usbhid->mutex); return res; diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 4140dea693e90988d80bac6129458499fb2d03af..45e0b1c75cb1129e5fdf7896c5c6264be169e3cd 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -519,12 +519,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, switch (cmd) { case HIDIOCGUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; uref->value = field->value[uref->usage_index]; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; case HIDIOCSUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; field->value[uref->usage_index] = uref->value; goto goodreturn; @@ -781,7 +785,6 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case HIDIOCGUCODE: - /* fall through */ case HIDIOCGUSAGE: case HIDIOCSUSAGE: case HIDIOCGUSAGES: diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index c6ad684d099a10d7f7328ca1e0dbe93c7c0ef3f1..75fe85d3d27a0a25d6f5aad7123bcf527ae13261 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -84,7 +83,6 @@ struct usbhid_device { struct mutex mutex; /* start/stop/open/close */ spinlock_t lock; /* fifo spinlock */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ - ktime_t input_start_time; /* When to start handling input */ struct timer_list io_retry; /* Retry timer */ unsigned long stop_retry; /* Time to give up, in jiffies */ unsigned int retry_delay; /* Delay length in ms */ diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 1c96809b51c909431255eb912d04156d7f5fd8aa..83dfec327c422b974c20de2b38be2ba97d989dee 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -341,7 +341,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) case 2: /* Mouse with wheel */ input_report_key(input, BTN_MIDDLE, data[1] & 0x04); - /* fall through */ + fallthrough; case 3: /* Mouse without wheel */ wacom->tool[0] = BTN_TOOL_MOUSE; @@ -1201,7 +1201,7 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len) case 0x04: wacom_intuos_bt_process_data(wacom, data + i); i += 10; - /* fall through */ + fallthrough; case 0x03: wacom_intuos_bt_process_data(wacom, data + i); i += 10; @@ -2148,7 +2148,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field for (i = 0; i < wacom->led.count; i++) wacom_update_led(wacom, features->numbered_buttons, value, i); - /* fall through*/ + fallthrough; default: do_report = true; break; @@ -3602,14 +3602,14 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, switch (features->type) { case GRAPHIRE_BT: __clear_bit(ABS_MISC, input_dev->absbit); - /* fall through */ + fallthrough; case WACOM_MO: case WACOM_G4: input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, features->distance_fuzz, 0); - /* fall through */ + fallthrough; case GRAPHIRE: input_set_capability(input_dev, EV_REL, REL_WHEEL); @@ -3649,7 +3649,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, case INTUOS4S: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_abs_set_res(input_dev, ABS_Z, 287); - /* fall through */ + fallthrough; case INTUOS: wacom_setup_intuos(wacom_wac); @@ -3682,7 +3682,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, case TABLETPC: case TABLETPCE: __clear_bit(ABS_MISC, input_dev->absbit); - /* fall through */ + fallthrough; case DTUS: case DTUSX: @@ -3696,7 +3696,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, case PTU: __set_bit(BTN_STYLUS2, input_dev->keybit); - /* fall through */ + fallthrough; case PENPARTNER: __set_bit(BTN_TOOL_PEN, input_dev->keybit); @@ -3799,7 +3799,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40); input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40); - /* fall through */ + fallthrough; case INTUOS5: case INTUOS5L: @@ -3817,7 +3817,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); - /* fall through */ + fallthrough; case WACOM_27QHDT: if (wacom_wac->shared->touch->product == 0x32C || @@ -3826,14 +3826,14 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, __set_bit(SW_MUTE_DEVICE, input_dev->swbit); wacom_wac->shared->has_mute_touch_switch = true; } - /* fall through */ + fallthrough; case MTSCREEN: case MTTPC: case MTTPC_B: case TABLETPC2FG: input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); - /*fall through */ + fallthrough; case TABLETPC: case TABLETPCE: @@ -3843,7 +3843,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, case INTUOSHT2: input_dev->evbit[0] |= BIT_MASK(EV_SW); __set_bit(SW_MUTE_DEVICE, input_dev->swbit); - /* fall through */ + fallthrough; case BAMBOO_PT: case BAMBOO_TOUCH: @@ -4099,7 +4099,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, __set_bit(KEY_BUTTONCONFIG, input_dev->keybit); __set_bit(KEY_INFO, input_dev->keybit); - /* fall through */ + fallthrough; case WACOM_21UX2: case WACOM_BEE: @@ -4115,7 +4115,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case INTUOS3: case INTUOS3L: input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); - /* fall through */ + fallthrough; case INTUOS3S: input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); @@ -4139,7 +4139,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, * ID_INPUT_TABLET to be set. */ __set_bit(BTN_STYLUS, input_dev->keybit); - /* fall through */ + fallthrough; case INTUOS4: case INTUOS4L: diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 365b5d5967accf1d69d79bb8dc3c685d827b7725..96d0eccca3aa74a5871b386f5dc369735e58745b 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -291,7 +291,7 @@ static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) /* CMT speech workaround */ if (atomic_read(&ssi->tx_usecnt)) break; - /* Else, fall through */ + fallthrough; case RECEIVING: mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); @@ -466,7 +466,7 @@ static void ssip_keep_alive(struct timer_list *t) case SEND_READY: if (atomic_read(&ssi->tx_usecnt) == 0) break; - /* Fall through */ + fallthrough; /* * Workaround for cmt-speech in that case * we relay on audio timers. @@ -668,7 +668,7 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) case ACTIVE: dev_err(&cl->device, "Boot info req on active state\n"); ssip_error(cl); - /* Fall through */ + fallthrough; case INIT: case HANDSHAKE: spin_lock_bh(&ssi->lock); diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index 4bc4a201f0f6c2186c667a583e98409cfe08ddb4..fa69b94debd9b584d4b5041b23cb31439dae2f10 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -296,7 +296,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event, break; case ABORT_RATE_CHANGE: dev_dbg(&ssi->device, "abort rate change\n"); - /* Fall through */ + fallthrough; case POST_RATE_CHANGE: dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n", clk_data->old_rate, clk_data->new_rate); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index e74b144b8f3d40be819f59b0d13bc1181d8bdcb5..754d35a25a1cc184bfe9aec0543e79be9633ae47 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -354,7 +354,7 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op) out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; - /* fallthrough */ + fallthrough; case KVP_OP_GET_IP_INFO: utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 92ee0fe4c919ec7171c76e15d96e532334cf7396..a4e8d96513c22ed5b06c6944108e71f5857b9e4d 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -282,26 +282,52 @@ static struct { spinlock_t lock; } host_ts; -static struct timespec64 hv_get_adj_host_time(void) +static inline u64 reftime_to_ns(u64 reftime) { - struct timespec64 ts; - u64 newtime, reftime; + return (reftime - WLTIMEDELTA) * 100; +} + +/* + * Hard coded threshold for host timesync delay: 600 seconds + */ +static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC; + +static int hv_get_adj_host_time(struct timespec64 *ts) +{ + u64 newtime, reftime, timediff_adj; unsigned long flags; + int ret = 0; spin_lock_irqsave(&host_ts.lock, flags); reftime = hv_read_reference_counter(); - newtime = host_ts.host_time + (reftime - host_ts.ref_time); - ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100); + + /* + * We need to let the caller know that last update from host + * is older than the max allowable threshold. clock_gettime() + * and PTP ioctl do not have a documented error that we could + * return for this specific case. Use ESTALE to report this. + */ + timediff_adj = reftime - host_ts.ref_time; + if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) { + pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n", + (timediff_adj * 100)); + ret = -ESTALE; + } + + newtime = host_ts.host_time + timediff_adj; + *ts = ns_to_timespec64(reftime_to_ns(newtime)); spin_unlock_irqrestore(&host_ts.lock, flags); - return ts; + return ret; } static void hv_set_host_time(struct work_struct *work) { - struct timespec64 ts = hv_get_adj_host_time(); - do_settimeofday64(&ts); + struct timespec64 ts; + + if (!hv_get_adj_host_time(&ts)) + do_settimeofday64(&ts); } /* @@ -361,10 +387,23 @@ static void timesync_onchannelcallback(void *context) struct ictimesync_ref_data *refdata; u8 *time_txf_buf = util_timesynch.recv_buffer; - vmbus_recvpacket(channel, time_txf_buf, - HV_HYP_PAGE_SIZE, &recvlen, &requestid); + /* + * Drain the ring buffer and use the last packet to update + * host_ts + */ + while (1) { + int ret = vmbus_recvpacket(channel, time_txf_buf, + HV_HYP_PAGE_SIZE, &recvlen, + &requestid); + if (ret) { + pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n", + ret); + break; + } + + if (!recvlen) + break; - if (recvlen > 0) { icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[ sizeof(struct vmbuspipe_hdr)]; @@ -622,9 +661,7 @@ static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) { - *ts = hv_get_adj_host_time(); - - return 0; + return hv_get_adj_host_time(ts); } static struct ptp_clock_info ptp_hyperv_info = { diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c index 319a0519ebdb0f44a675cd241a85a114b2b8dc7f..208813158bb405539044480fe13c921d0247be1d 100644 --- a/drivers/hwmon/adt7462.c +++ b/drivers/hwmon/adt7462.c @@ -435,7 +435,7 @@ static const char *voltage_label(struct adt7462_data *data, int which) case 3: return "+1.5V"; } - /* fall through */ + fallthrough; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return "+12V3"; @@ -493,7 +493,7 @@ static const char *voltage_label(struct adt7462_data *data, int which) case 3: return "+1.5"; } - /* fall through */ + fallthrough; case 11: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == ADT7462_PIN28_VOLT && @@ -531,7 +531,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which) case 3: return 7800; } - /* fall through */ + fallthrough; case 2: if (!(data->pin_cfg[1] & ADT7462_PIN22_INPUT)) return 62500; @@ -589,7 +589,7 @@ static int voltage_multiplier(struct adt7462_data *data, int which) case 3: return 7800; } - /* fall through */ + fallthrough; case 11: case 12: if (data->pin_cfg[3] >> ADT7462_PIN28_SHIFT == diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 31661840931577e54cc75e654ec6b4f86af630c0..a18887990f4a2120c45c0d2ef744df7353b54fc8 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -753,15 +753,18 @@ static ssize_t applesmc_light_show(struct device *dev, } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; + + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: @@ -810,12 +813,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); - speed = ((buffer[0] << 8 | buffer[1]) >> 2); - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + + speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, @@ -851,12 +853,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, @@ -872,10 +873,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; + val = (buffer[0] << 8 | buffer[1]); + if (input) val = val | (0x01 << to_index(attr)); else @@ -951,13 +953,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + - ((u32)buffer[2]<<8) + buffer[3]; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + + ((u32)buffer[2]<<8) + buffer[3]; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index cf0962f7a02033a9f3112f9e0df241399a538fa9..e9c0bbc2caa95636411a069c2debbca451656bad 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -406,10 +406,10 @@ static int emc1403_probe(struct i2c_client *client, switch (id->driver_data) { case emc1404: data->groups[2] = &emc1404_group; - /* fall through */ + fallthrough; case emc1403: data->groups[1] = &emc1403_group; - /* fall through */ + fallthrough; case emc1402: data->groups[0] = &emc1402_group; } diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index d09deb409de7a0b4a918c02c0a5d7cbede17fa14..4dec793fd07d576caa8c9ae5b4ff1b38ad9a6f1b 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -1285,7 +1285,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->pwm_auto_point_pwm[nr][0] = f71882fg_read8(data, F71882FG_REG_POINT_PWM(nr, 0)); - /* Fall through */ + fallthrough; case f71862fg: data->pwm_auto_point_pwm[nr][1] = f71882fg_read8(data, @@ -2442,7 +2442,7 @@ static int f71882fg_probe(struct platform_device *pdev) case f71869a: /* These always have signed auto point temps */ data->auto_point_temp_signed = 1; - /* Fall through - to select correct fan/pwm reg bank! */ + fallthrough; /* to select correct fan/pwm reg bank! */ case f71889fg: case f71889ed: case f71889a: diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c index 3dfe2ca2f8c89cad51f015516f2224a9e3d7092b..c6d4567f395205da803451431ca380bce5240c80 100644 --- a/drivers/hwmon/gsc-hwmon.c +++ b/drivers/hwmon/gsc-hwmon.c @@ -172,6 +172,7 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, case mode_temperature: if (tmp > 0x8000) tmp -= 0xffff; + tmp *= 100; /* convert to millidegrees celsius */ break; case mode_voltage_raw: tmp = clamp_val(tmp, 0, BIT(GSC_HWMON_RESOLUTION)); diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index eb72e390844ecddf10db5b9b5b765e2560d96a81..6d1175a5183264e8f4ef39e9e86ce6510fd65c35 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -96,7 +96,7 @@ int vid_from_reg(int val, u8 vrm) val &= 0x1f; if (val == 0x1f) return 0; - /* fall through */ + fallthrough; case 25: /* AMD NPT 0Fh */ val &= 0x3f; return (val < 32) ? 1550 - 25 * val @@ -122,7 +122,7 @@ int vid_from_reg(int val, u8 vrm) case 84: /* VRM 8.4 */ val &= 0x0f; - /* fall through */ + fallthrough; case 82: /* VRM 8.2 */ val &= 0x1f; return val == 0x1f ? 0 : diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index 7fc5b065ad8b238aabacae137025b7506e0f89d2..81e155692aba57631fc2f26a7584908156681a18 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -352,7 +352,7 @@ static int ina3221_read_curr(struct device *dev, u32 attr, if (ret) return ret; - /* fall through */ + fallthrough; case hwmon_curr_crit: case hwmon_curr_max: if (!resistance_uo) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 750b08713dee50434afc2ac0da80a1bd8ec9e81d..5bd15622a85f942ea7eefbf1b80124640bb368c7 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -2669,7 +2669,7 @@ static void pwm_update_registers(struct nct6775_data *data, int nr) case thermal_cruise: nct6775_write_value(data, data->REG_TARGET[nr], data->target_temp[nr]); - /* fall through */ + fallthrough; default: reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]); reg = (reg & ~data->tolerance_mask) | diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index b0425694f702252e250e28e7b57aded40a5b6017..242ff8bee78dd718405210800e9086c67faac77f 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -231,7 +231,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f); - if (cnt == 0x1fff) + if (cnt == 0 || cnt == 0x1fff) rpm = 0; else rpm = 1350000 / cnt; @@ -243,7 +243,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel, if (ret < 0) return ret; cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f); - if (cnt == 0x1fff) + if (cnt == 0 || cnt == 0x1fff) rpm = 0; else rpm = 1350000 / cnt; diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index 30e18eb60da79eb61ba9194a056de8a2198311ad..a71777990d49606a57b1f7ced79bf0e154bb1987 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -752,7 +752,7 @@ static int occ_setup_sensor_attrs(struct occ *occ) switch (sensors->freq.version) { case 2: show_freq = occ_show_freq_2; - /* fall through */ + fallthrough; case 1: num_attrs += (sensors->freq.num_sensors * 2); break; @@ -763,7 +763,7 @@ static int occ_setup_sensor_attrs(struct occ *occ) switch (sensors->power.version) { case 2: show_power = occ_show_power_2; - /* fall through */ + fallthrough; case 1: num_attrs += (sensors->power.num_sensors * 4); break; @@ -781,7 +781,7 @@ static int occ_setup_sensor_attrs(struct occ *occ) break; case 3: show_caps = occ_show_caps_3; - /* fall through */ + fallthrough; case 2: num_attrs += (sensors->caps.num_sensors * 8); break; diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c index 0c622711ef7e0e926a20980961e98322d7663d9e..58aa95a3c010cd81139ee037a68d88b63c1ae712 100644 --- a/drivers/hwmon/pmbus/isl68137.c +++ b/drivers/hwmon/pmbus/isl68137.c @@ -67,6 +67,7 @@ enum variants { raa_dmpvr1_2rail, raa_dmpvr2_1rail, raa_dmpvr2_2rail, + raa_dmpvr2_2rail_nontc, raa_dmpvr2_3rail, raa_dmpvr2_hv, }; @@ -241,6 +242,10 @@ static int isl68137_probe(struct i2c_client *client, info->pages = 1; info->read_word_data = raa_dmpvr2_read_word_data; break; + case raa_dmpvr2_2rail_nontc: + info->func[0] &= ~PMBUS_HAVE_TEMP; + info->func[1] &= ~PMBUS_HAVE_TEMP; + fallthrough; case raa_dmpvr2_2rail: info->pages = 2; info->read_word_data = raa_dmpvr2_read_word_data; @@ -304,7 +309,7 @@ static const struct i2c_device_id raa_dmpvr_id[] = { {"raa228000", raa_dmpvr2_hv}, {"raa228004", raa_dmpvr2_hv}, {"raa228006", raa_dmpvr2_hv}, - {"raa228228", raa_dmpvr2_2rail}, + {"raa228228", raa_dmpvr2_2rail_nontc}, {"raa229001", raa_dmpvr2_2rail}, {"raa229004", raa_dmpvr2_2rail}, {} diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c index e1d10a6b7f7c058e2972c27dd75fd74baa6b9ee5..a07b97400cba3571691cc6637a0721a18197daed 100644 --- a/drivers/hwmon/w83627hf.c +++ b/drivers/hwmon/w83627hf.c @@ -1213,7 +1213,7 @@ temp_type_store(struct device *dev, struct device_attribute *devattr, case W83781D_DEFAULT_BETA: dev_warn(dev, "Sensor type %d is deprecated, please use 4 " "instead\n", W83781D_DEFAULT_BETA); - /* fall through */ + fallthrough; case 4: /* thermistor */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c index 015f1ea319669eff7932daf49456a6638b9a7add..d833a4f16c47b85f15fc8b340d036c5ce25823de 100644 --- a/drivers/hwmon/w83781d.c +++ b/drivers/hwmon/w83781d.c @@ -814,7 +814,7 @@ store_sensor(struct device *dev, struct device_attribute *da, dev_warn(dev, "Sensor type %d is deprecated, please use 4 instead\n", W83781D_DEFAULT_BETA); - /* fall through */ + fallthrough; case 4: /* thermistor */ tmp = w83781d_read_value(data, W83781D_REG_SCFG1); w83781d_write_value(data, W83781D_REG_SCFG1, diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 44f68b965aec300f0e0b481d88b2426031d4334f..6d52b530b429dc899ca248b8e6beffbd88c35aad 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -2127,7 +2127,7 @@ static void w83795_apply_temp_config(struct w83795_data *data, u8 config, if (temp_chan >= 4) break; data->temp_mode |= 1 << temp_chan; - /* fall through */ + fallthrough; case 0x3: /* Thermistor */ data->has_temp |= 1 << temp_chan; break; diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 96544b348c273fe3ede7ac5e40547afd8b4322bc..7e642fb3ed15cc1713b0dbb7ffcbc9e93ab84404 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -346,10 +346,10 @@ static void debug_init_arch_data(void *info) switch (mode) { case EDDEVID_IMPL_FULL: drvdata->edvidsr_present = true; - /* Fall through */ + fallthrough; case EDDEVID_IMPL_EDPCSR_EDCIDSR: drvdata->edcidsr_present = true; - /* Fall through */ + fallthrough; case EDDEVID_IMPL_EDPCSR: /* * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 6d7d2169bfb26738a14bc4df8d4c79e406943635..96425e818fc20f86f26878fb8a3c76054716dc79 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1382,7 +1382,6 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, return NOTIFY_BAD; break; case CPU_PM_EXIT: - /* fallthrough */ case CPU_PM_ENTER_FAILED: if (drvdata->state_needs_restore) etm4_cpu_restore(drvdata); diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 7040d583bed9e958d066539a277fec558dfee904..9ca3aaafcfbc272ff4e2db133cf2b031e87ec1be 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -84,9 +84,7 @@ u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata) */ switch (drvdata->memwidth) { case TMC_MEM_INTF_WIDTH_32BITS: - /* fallthrough */ case TMC_MEM_INTF_WIDTH_64BITS: - /* fallthrough */ case TMC_MEM_INTF_WIDTH_128BITS: mask = GENMASK(31, 4); break; diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index a1529f571491d991aec1513ce007758077ca96b0..9ca8c4e045f8b6c95099d04a2131fe5955cb278d 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -84,11 +84,11 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data, /* Global packets (GERR, XSYNC, TRIG) are sent with register writes */ case STP_PACKET_GERR: reg += 4; - /* fall through */ + fallthrough; case STP_PACKET_XSYNC: reg += 8; - /* fall through */ + fallthrough; case STP_PACKET_TRIG: if (flags & STP_PACKET_TIMESTAMPED) diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 710fbef9a9c2099f50a6d44a6ff583f4ae10cf43..384af88e58adaeb99cce13cb7f21cef812607280 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap) pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); pca_outw(adap, I2C_PCA_IND, 0xA5); pca_outw(adap, I2C_PCA_IND, 0x5A); + + /* + * After a reset we need to re-apply any configuration + * (calculated in pca_init) to get the bus in a working state. + */ + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); + + pca_set_con(adap, I2C_PCA_CON_ENSIO); } else { adap->reset_chip(adap->data); + pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); } } @@ -423,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap) " Use the nominal frequency.\n", adap->name); } - pca_reset(pca_data); - clock = pca_clock(pca_data); printk(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); - pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.clock_freq = clock; + + pca_reset(pca_data); } else { int clock; int mode; @@ -496,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap) thi = tlow * min_thi / min_tlow; } + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.mode = mode; + pca_data->bus_settings.tlow = tlow; + pca_data->bus_settings.thi = thi; + pca_reset(pca_data); printk(KERN_INFO "%s: Clock frequency is %dHz\n", adap->name, clock * 100); - - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE); - pca_outw(pca_data, I2C_PCA_IND, mode); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL); - pca_outw(pca_data, I2C_PCA_IND, tlow); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH); - pca_outw(pca_data, I2C_PCA_IND, thi); - - pca_set_con(pca_data, I2C_PCA_CON_ENSIO); } udelay(500); /* 500 us for oscillator to stabilise */ diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 688e92818821463fa97869059d54e071ef45de36..d8295b1c379d1b947fefde1e6a7ab5a6faebc395 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -720,7 +720,7 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c, /* mark the last byte */ if (!process_call && (i == msg->len - 1)) - val |= 1 << M_TX_WR_STATUS_SHIFT; + val |= BIT(M_TX_WR_STATUS_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val); } @@ -738,7 +738,7 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c, */ addr = i2c_8bit_addr_from_msg(msg); /* mark it the last byte out */ - val = addr | (1 << M_TX_WR_STATUS_SHIFT); + val = addr | BIT(M_TX_WR_STATUS_SHIFT); iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val); } diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 75f07138a6fa25d19463c659dc3c7e886daf4dee..dfcf04e1967f1743db35b82a7c62532a1d57ac36 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2093,8 +2093,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } } - /* Adaptive TimeOut: astimated time in usec + 100% margin */ - timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite); + /* + * Adaptive TimeOut: estimated time in usec + 100% margin: + * 2: double the timeout for clock stretching case + * 9: bits per transaction (including the ack/nack) + */ + timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite); timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec)); if (nwrite >= 32 * 1024 || nread >= 32 * 1024) { dev_err(bus->dev, "i2c%d buffer too big\n", bus->num); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 175c590b93b76cbd72b8270ced91a78dad14ddae..12ac4212aded893fac85613c507454f3c1ebc506 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1425,7 +1425,6 @@ omap_i2c_probe(struct platform_device *pdev) major = OMAP_I2C_REV_SCHEME_0_MAJOR(omap->rev); break; case OMAP_I2C_SCHEME_1: - /* FALLTHROUGH */ default: omap->regs = (u8 *)reg_map_ip_v2; rev = (rev << 16) | diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c index 1c4c9bb06a0bfbff9ac054c21d56808dacdf0fe6..6eb0f50c5d2878d73e1395bfe0c8e209bceb7c07 100644 --- a/drivers/i2c/busses/i2c-opal.c +++ b/drivers/i2c/busses/i2c-opal.c @@ -125,7 +125,7 @@ static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr, case I2C_SMBUS_BYTE: req.buffer_ra = cpu_to_be64(__pa(&data->byte)); req.size = cpu_to_be32(1); - /* Fall through */ + fallthrough; case I2C_SMBUS_QUICK: req.type = (read_write == I2C_SMBUS_READ) ? OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE; diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 9e883474db8ceb3894c3fd789d4671e3379e5439..c7c543483b08caebb5395829d88434b7db15e100 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -590,6 +590,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 2ade99b105b91e263aa14e0463a1098e844b25ce..e627d7b2790f75c5656bc9ef58afa823e8cbefbf 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -276,16 +276,6 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); } -const struct acpi_device_id * -i2c_acpi_match_device(const struct acpi_device_id *matches, - struct i2c_client *client) -{ - if (!(client && matches)) - return NULL; - - return acpi_match_device(matches, &client->dev); -} - static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { /* * These Silead touchscreen controllers only work at 400KHz, for diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 34a9609f256daca9854f76458b69a7e4f091a98d..5ec082e2039d96ffb6d9c0aa54682bd5a3f64af3 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -480,7 +480,7 @@ static int i2c_device_probe(struct device *dev) * or ACPI ID table is supplied for the probing device. */ if (!driver->id_table && - !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && + !acpi_driver_match_device(dev, dev->driver) && !i2c_of_match_device(dev->driver->of_match_table, client)) { status = -ENODEV; goto put_sync_adapter; diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 94ff1693b3913f4b0717c03420a0e56a2d4666a3..8ce261167a2d3d9d7f7a8f87979a79fe62351cd6 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -59,20 +59,11 @@ static inline int __i2c_check_suspended(struct i2c_adapter *adap) } #ifdef CONFIG_ACPI -const struct acpi_device_id * -i2c_acpi_match_device(const struct acpi_device_id *matches, - struct i2c_client *client); void i2c_acpi_register_devices(struct i2c_adapter *adap); int i2c_acpi_get_irq(struct i2c_client *client); #else /* CONFIG_ACPI */ static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } -static inline const struct acpi_device_id * -i2c_acpi_match_device(const struct acpi_device_id *matches, - struct i2c_client *client) -{ - return NULL; -} static inline int i2c_acpi_get_irq(struct i2c_client *client) { diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 5c5306cd50ec3bdefe4e32e61d9df1c0677a538d..8513bd353c052b1b89405aff3d4bae918d3b0c1d 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -603,7 +603,7 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m) ret = dw_i2c_clk_cfg(master); if (ret) return ret; - /* fall through */ + fallthrough; case I3C_BUS_MODE_PURE: ret = dw_i3c_clk_cfg(master); if (ret) diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index fd3b5da44619b39852f7f538e3a40d70654c9569..50c9a41467c881e7966d41f390e1359333daac9c 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -575,14 +575,14 @@ static u8 hpt3xx_udma_filter(ide_drive_t *drive) if (!HPT370_ALLOW_ATA100_5 || check_in_drive_list(drive, bad_ata100_5)) return ATA_UDMA4; - /* fall through */ + fallthrough; case HPT372 : case HPT372A: case HPT372N: case HPT374 : if (ata_id_is_sata(drive->id)) mask &= ~0x0e; - /* fall through */ + fallthrough; default: return mask; } @@ -602,7 +602,7 @@ static u8 hpt3xx_mdma_filter(ide_drive_t *drive) case HPT374 : if (ata_id_is_sata(drive->id)) return 0x00; - /* fall through */ + fallthrough; default: return 0x07; } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 7f17f830398884ec5bd03c2f4e5cee0b60af4934..212bb2d8bf346a9309c206b7faae6086d2045b06 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -350,7 +350,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) */ if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT) break; - /* fall-through */ + fallthrough; case DATA_PROTECT: /* * No point in retrying after an illegal request or data @@ -750,7 +750,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) case REQ_OP_DRV_IN: case REQ_OP_DRV_OUT: expiry = ide_cd_expiry; - /*FALLTHRU*/ + fallthrough; default: timeout = ATAPI_WAIT_PC; break; diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 1fe1f9d37a511c27c17f3376ff300b788269af04..af7503b47dbe3296d491cb2b246f82f0b9456e75 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -428,7 +428,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) * (maintains previous driver behaviour) */ break; - /* fall through */ + fallthrough; case CAPACITY_CURRENT: /* Normal Zip/LS-120 disks */ if (memcmp(cap_desc, &floppy->cap_desc, 8)) diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index e867129466b058c4095aacb813f7c3fee6b3a7dd..1ddc45a04418cde357f71819b1e6a9318c35787c 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -143,7 +143,7 @@ static void ide_classify_atapi_dev(ide_drive_t *drive) } /* Early cdrom models used zero */ type = ide_cdrom; - /* fall through */ + fallthrough; case ide_cdrom: drive->dev_flags |= IDE_DFLAG_REMOVABLE; #ifdef CONFIG_PPC diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index a26f85ab58a95e813e79c03acfb672792af8ed13..d016cbe68cba17419ba279dc74dd041a8d11e288 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -129,7 +129,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) return pre_task_out_intr(drive, cmd); } handler = task_pio_intr; - /* fall through */ + fallthrough; case ATA_PROT_NODATA: if (handler == NULL) handler = task_no_data_intr; @@ -141,7 +141,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) hwif->expiry = dma_ops->dma_timer_expiry; ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD); dma_ops->dma_start(drive); - /* fall through */ + fallthrough; default: return ide_started; } @@ -579,10 +579,10 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) goto abort; } cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; - /* fall through */ + fallthrough; case TASKFILE_OUT: cmd.protocol = ATA_PROT_PIO; - /* fall through */ + fallthrough; case TASKFILE_OUT_DMAQ: case TASKFILE_OUT_DMA: cmd.tf_flags |= IDE_TFLAG_WRITE; @@ -598,10 +598,10 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) goto abort; } cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; - /* fall through */ + fallthrough; case TASKFILE_IN: cmd.protocol = ATA_PROT_PIO; - /* fall through */ + fallthrough; case TASKFILE_IN_DMAQ: case TASKFILE_IN_DMA: nsect = taskin / SECTOR_SIZE; diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index 024bc7ba49ee5e8198b49a2f510ff05d0be70840..1a700bef6c565c2f5187e1b5a69a29c8b2d9882e 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c @@ -494,7 +494,7 @@ static int init_chipset_sis5513(struct pci_dev *dev) pci_read_config_byte(dev, 0x09, ®); if ((reg & 0x0f) != 0x00) pci_write_config_byte(dev, 0x09, reg&0xf0); - /* fall through */ + fallthrough; case ATA_16: /* force per drive recovery and active timings needed on ATA_33 and below chips */ diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 8e0fb1a5bdbd1e4fe81e6dfdef653d65f4bc28af..9a810e4a794603a1b28f86e7969d178e2463c447 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -89,14 +89,6 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) -/* - * Set this flag for states where the HW flushes the TLB for us - * and so we don't need cross-calls to keep it consistent. - * If this flag is set, SW flushes the TLB, so even if the - * HW doesn't do the flushing, this flag is safe to use. - */ -#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16) - /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, unsigned long eax = flg2MWAIT(state->flags); unsigned long ecx = 1; /* break on interrupt flag */ bool tick; - int cpu = smp_processor_id(); - - /* - * leave_mm() to avoid costly and often unnecessary wakeups - * for flushing the user TLB's associated with the active mm. - */ - if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) - leave_mm(cpu); if (!static_cpu_has(X86_FEATURE_ARAT)) { /* diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 24864d9dfab5d09e274c5d9529676af300f58c78..48435865fdaf34387b563b9ce025e00fddedcc0b 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -189,6 +189,14 @@ struct bmc150_accel_data { struct mutex mutex; u8 fifo_mode, watermark; s16 buffer[8]; + /* + * Ensure there is sufficient space and correct alignment for + * the timestamp if enabled + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; u8 bw_bits; u32 slope_dur; u32 slope_thres; @@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, * now. */ for (i = 0; i < count; i++) { - u16 sample[8]; int j, bit; j = 0; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) - memcpy(&sample[j++], &buffer[i * 3 + bit], 2); + memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], + sizeof(data->scan.channels[0])); - iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + tstamp); tstamp += sample_period; } diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 66b2e4cf24cfab91f1a5a559d5f52fd97a58ce3b..0e18b92e20992a69fca70ffe585236f78e6cff8c 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) const struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct kxsd9_state *st = iio_priv(indio_dev); + /* + * Ensure correct positioning and alignment of timestamp. + * No need to zero initialize as all elements written. + */ + struct { + __be16 chan[4]; + s64 ts __aligned(8); + } hw_values; int ret; - /* 4 * 16bit values AND timestamp */ - __be16 hw_values[8]; ret = regmap_bulk_read(st->map, KXSD9_REG_X, - &hw_values, - 8); + hw_values.chan, + sizeof(hw_values.chan)); if (ret) { dev_err(st->dev, "error reading data\n"); @@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, - hw_values, + &hw_values, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c index 7e99bcb3398dff1356fbb8094f40eaa4d4437054..922bd38ff6ea971dcfb1e543314cb7969ba8b4e9 100644 --- a/drivers/iio/accel/mma7455_core.c +++ b/drivers/iio/accel/mma7455_core.c @@ -52,6 +52,14 @@ struct mma7455_data { struct regmap *regmap; + /* + * Used to reorganize data. Will ensure correct alignment of + * the timestamp if present + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static int mma7455_drdy(struct mma7455_data *mma7455) @@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma7455_data *mma7455 = iio_priv(indio_dev); - u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ int ret; ret = mma7455_drdy(mma7455); if (ret) goto done; - ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, - sizeof(__le16) * 3); + ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, + mma7455->scan.channels, + sizeof(mma7455->scan.channels)); if (ret) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index ba27f86731314dc770048160430265d05332db7c..853febc294885e0ae5a2a92c76c526bd2e9b4158 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -110,6 +110,12 @@ struct mma8452_data { int sleep_val; struct regulator *vdd_reg; struct regulator *vddio_reg; + + /* Ensure correct alignment of time stamp when present */ + struct { + __be16 channels[3]; + s64 ts __aligned(8); + } buffer; }; /** @@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma8452_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 3 16-bit channels + padding + ts */ int ret; - ret = mma8452_read(data, (__be16 *)buffer); + ret = mma8452_read(data, data->buffer.channels); if (ret < 0) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); done: @@ -1580,7 +1585,7 @@ static int mma8452_probe(struct i2c_client *client, case FXLS8471_DEVICE_ID: if (ret == data->chip_info->chip_id) break; - /* fall through */ + fallthrough; default: ret = -ENODEV; goto disable_regulators; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 66d9cc07315735f45a716b3dcda0f45804b889d4..d94dc800b8422ef7d8d2bbb086805a9659619adb 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -865,6 +865,8 @@ config ROCKCHIP_SARADC tristate "Rockchip SARADC driver" depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST) depends on RESET_CONTROLLER + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say yes here to build support for the SARADC found in SoCs from Rockchip. diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c index 7fdc5d2d1d35c152cb4d3f5fe6484a6a381a0c68..1bb987a4acbabc7b1c41f8e411323fb9d8813297 100644 --- a/drivers/iio/adc/ab8500-gpadc.c +++ b/drivers/iio/adc/ab8500-gpadc.c @@ -484,7 +484,7 @@ static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc, delay_max = 10000; /* large range optimises sleepmode */ break; } - /* Fall through */ + fallthrough; default: ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA; break; diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c index 84a1733e5913f438c53538cb18817d6283fb18cd..64c3cc3823110239c534d35302ba621dfcae3895 100644 --- a/drivers/iio/adc/cpcap-adc.c +++ b/drivers/iio/adc/cpcap-adc.c @@ -690,7 +690,7 @@ static void cpcap_adc_phase(struct cpcap_adc_request *req) break; case CPCAP_ADC_BATTI_PI17: index = req->bank_index; - /* fallthrough */ + fallthrough; default: req->result += conv_tbl[index].cal_offset; req->result += conv_tbl[index].align_offset; diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 5ed63e8742923fa6a053904be7eeedfb1c1951c1..b573ec60a8b8fea1d1773709b22f2318aecca1a6 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -146,6 +146,11 @@ struct ina2xx_chip_info { int range_vbus; /* Bus voltage maximum in V */ int pga_gain_vshunt; /* Shunt voltage PGA gain */ bool allow_async_readout; + /* data buffer needs space for channel data and timestamp */ + struct { + u16 chan[4]; + u64 ts __aligned(8); + } scan; }; static const struct ina2xx_config ina2xx_config[] = { @@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev) static int ina2xx_work_buffer(struct iio_dev *indio_dev) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); - /* data buffer needs space for channel data and timestap */ - unsigned short data[4 + sizeof(s64)/sizeof(short)]; int bit, ret, i = 0; s64 time; @@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) if (ret < 0) return ret; - data[i++] = val; + chip->scan.chan[i++] = val; } - iio_push_to_buffers_with_timestamp(indio_dev, data, time); + iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time); return 0; }; diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 01b20e420ac4563719ab3ab623611f7f33a5aa85..6efb0b43d9389eec57f0dc345ffa54f8587f8b61 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -36,6 +36,11 @@ struct max1118 { struct spi_device *spi; struct mutex lock; struct regulator *reg; + /* Ensure natural alignment of buffer elements */ + struct { + u8 channels[2]; + s64 ts __aligned(8); + } scan; u8 data ____cacheline_aligned; }; @@ -166,7 +171,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max1118 *adc = iio_priv(indio_dev); - u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */ int scan_index; int i = 0; @@ -184,10 +188,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) goto out; } - data[i] = ret; + adc->scan.channels[i] = ret; i++; } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 5f1706d1c3c08a66fdc45f6e2d4f73691f6f99c3..da353dcb1e9d4988aeec45731f5a6b26acbf8d82 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -96,16 +96,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) { int ret; - mutex_lock(&adc->lock); - ret = i2c_master_send(adc->i2c, &newconfig, 1); if (ret > 0) { adc->config = newconfig; ret = 0; } - mutex_unlock(&adc->lock); - return ret; } @@ -138,6 +134,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, u8 config; u8 req_channel = channel->channel; + mutex_lock(&adc->lock); + if (req_channel != MCP3422_CHANNEL(adc->config)) { config = adc->config; config &= ~MCP3422_CHANNEL_MASK; @@ -145,12 +143,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc, config &= ~MCP3422_PGA_MASK; config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); ret = mcp3422_update_config(adc, config); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&adc->lock); return ret; + } msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); } - return mcp3422_read(adc, value, &config); + ret = mcp3422_read(adc, value, &config); + + mutex_unlock(&adc->lock); + + return ret; } static int mcp3422_read_raw(struct iio_dev *iio, diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 93c2252c0b89017f9d39bbd9f4faf6fbb526baf0..1a9189ba69aecec180d66e2528000f87ae1de418 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -707,7 +707,7 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev) size_t read_len; int ret; - temperature_calib = devm_nvmem_cell_get(&indio_dev->dev, + temperature_calib = devm_nvmem_cell_get(indio_dev->dev.parent, "temperature_calib"); if (IS_ERR(temperature_calib)) { ret = PTR_ERR(temperature_calib); diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 9426f70a800598c0f4010708f332e8e5410a6c13..cf63983a54d99b6610fa939c06f3185a2a8f3a6a 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -33,6 +33,12 @@ struct adc081c { /* 8, 10 or 12 */ int bits; + + /* Ensure natural alignment of buffer elements */ + struct { + u16 channel; + s64 ts __aligned(8); + } scan; }; #define REG_CONV_RES 0x00 @@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc081c *data = iio_priv(indio_dev); - u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ int ret; ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); if (ret < 0) goto out; - buf[0] = ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + data->scan.channel = ret; + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index 9017e1e24273b3bb9fae7a2442164546b2e82bae..dfba34834a575f9d64d338e3ecb9a027673dc131 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -26,6 +26,11 @@ struct adc084s021 { struct spi_transfer spi_trans; struct regulator *reg; struct mutex lock; + /* Buffer used to align data */ + struct { + __be16 channels[4]; + s64 ts __aligned(8); + } scan; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache line. @@ -141,14 +146,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc) struct iio_poll_func *pf = pollfunc; struct iio_dev *indio_dev = pf->indio_dev; struct adc084s021 *adc = iio_priv(indio_dev); - __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */ mutex_lock(&adc->lock); - if (adc084s021_adc_conversion(adc, &data) < 0) + if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0) dev_err(&adc->spi->dev, "Failed to read data\n"); - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); mutex_unlock(&adc->lock); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index f42ab112986ec0aa04f9ae43a7676aa1f7cd57b1..9fef39bcf997b90be945c25942489aa6f2842333 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -316,6 +316,7 @@ static const struct iio_chan_spec ads1115_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +#ifdef CONFIG_PM static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; @@ -333,6 +334,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) return ret < 0 ? ret : 0; } +#else /* !CONFIG_PM */ + +static int ads1015_set_power_state(struct ads1015_data *data, bool on) +{ + return 0; +} + +#endif /* !CONFIG_PM */ + static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 2b007e7568b21b72d51a47614e7498269cc14c18..60dd87e96f5f8b0049f9b3af4e7be6e82b3b0ab8 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -78,6 +78,11 @@ struct ccs811_data { struct iio_trigger *drdy_trig; struct gpio_desc *wakeup_gpio; bool drdy_trig_on; + /* Ensures correct alignment of timestamp if present */ + struct { + s16 channels[2]; + s64 ts __aligned(8); + } scan; }; static const struct iio_chan_spec ccs811_channels[] = { @@ -327,17 +332,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ccs811_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */ int ret; - ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4, - (u8 *)&buf); + ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, + sizeof(data->scan.channels), + (u8 *)data->scan.channels); if (ret != 4) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c index 5a29e32c295fc9c3e60ced52121e02e3463bdf7f..2ea9a5c4d84629c14f92ee2698726b7ea18d3e78 100644 --- a/drivers/iio/chemical/sps30.c +++ b/drivers/iio/chemical/sps30.c @@ -118,7 +118,7 @@ static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size) case SPS30_READ_AUTO_CLEANING_PERIOD: buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8; buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff); - /* fall through */ + fallthrough; case SPS30_READ_DATA_READY_FLAG: case SPS30_READ_DATA: case SPS30_READ_SERIAL: diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index ea480c1d4349ec0789044cb8dc398a2883078422..1bc6efa47316351571d6a233ec0c802e6b4b87b5 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -72,10 +72,13 @@ static void get_default_min_max_freq(enum motionsensor_type type, switch (type) { case MOTIONSENSE_TYPE_ACCEL: - case MOTIONSENSE_TYPE_GYRO: *min_freq = 12500; *max_freq = 100000; break; + case MOTIONSENSE_TYPE_GYRO: + *min_freq = 25000; + *max_freq = 100000; + break; case MOTIONSENSE_TYPE_MAG: *min_freq = 5000; *max_freq = 25000; diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index cc4875660a69f4889233eb7ae6346221592913cb..1fd75c02a7cd7248444f3abc8bc3ad5953e3f7bd 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -220,7 +220,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st) break; case CH_MODE_UNUSED: - /* fall-through */ default: switch (st->channel_offstate[i]) { case CH_OFFSTATE_OUT_TRISTATE: @@ -237,7 +236,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st) break; case CH_OFFSTATE_PULLDOWN: - /* fall-through */ default: pulldown |= BIT(i); break; diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c index b3835fb6b86266a85a6a5be4736b351369978ba4..1a9609eda5c5ca047cfb32ff0be569af98c43282 100644 --- a/drivers/iio/dac/dpot-dac.c +++ b/drivers/iio/dac/dpot-dac.c @@ -74,11 +74,12 @@ static int dpot_dac_read_raw(struct iio_dev *indio_dev, case IIO_VAL_INT: /* * Convert integer scale to fractional scale by - * setting the denominator (val2) to one, and... + * setting the denominator (val2) to one... */ *val2 = 1; ret = IIO_VAL_FRACTIONAL; - /* fall through */ + /* ...and fall through. Say it again for GCC. */ + fallthrough; case IIO_VAL_FRACTIONAL: *val *= regulator_get_voltage(dac->vref) / 1000; *val2 *= dac->max_ohms; diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 9b47d9472a4fde84325fabb0b5cc1b22e5fe82f5..d9b2ed80882a43a7bd78f4af80522916bccbb43e 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -273,10 +273,10 @@ static int max30102_read_measurement(struct max30102_data *data, switch (measurements) { case 3: MAX30102_COPY_DATA(2); - /* fall through */ + fallthrough; case 2: MAX30102_COPY_DATA(1); - /* fall through */ + fallthrough; case 1: MAX30102_COPY_DATA(0); break; diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index c539dfa3b8d3ed06f6c848ad5f3b431efe7108e1..319b64b2fd8879ea6b113050540e9f963738b8a3 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -97,11 +97,11 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, adis->tx[9] = (value >> 24) & 0xff; adis->tx[6] = ADIS_WRITE_REG(reg + 2); adis->tx[7] = (value >> 16) & 0xff; - /* fall through */ + fallthrough; case 2: adis->tx[4] = ADIS_WRITE_REG(reg + 1); adis->tx[5] = (value >> 8) & 0xff; - /* fall through */ + fallthrough; case 1: adis->tx[2] = ADIS_WRITE_REG(reg); adis->tx[3] = value & 0xff; @@ -191,7 +191,7 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, adis->tx[2] = ADIS_READ_REG(reg + 2); adis->tx[3] = 0; spi_message_add_tail(&xfers[1], &msg); - /* fall through */ + fallthrough; case 2: adis->tx[4] = ADIS_READ_REG(reg); adis->tx[5] = 0; diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 606d5e61c5759970689db730f4a240c786458263..cdcd16f19500a28bff6960e426b82e6b5f1a5bc5 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -599,7 +599,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, return scnprintf(buf, len, "%d", vals[0]); case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; - /* fall through */ + fallthrough; case IIO_VAL_INT_PLUS_MICRO: if (vals[1] < 0) return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]), @@ -918,7 +918,7 @@ static ssize_t iio_write_channel_info(struct device *dev, break; case IIO_VAL_INT_PLUS_MICRO_DB: scale_db = true; - /* fall through */ + fallthrough; case IIO_VAL_INT_PLUS_MICRO: fract_mult = 100000; break; diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 4bac0646398d569752d869dbe5e342265ad2e39c..b4323d2db0b19b53a9405de2e3d54a875f7059e4 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1243,13 +1243,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ltr501_data *data = iio_priv(indio_dev); - u16 buf[8]; + struct { + u16 channels[3]; + s64 ts __aligned(8); + } scan; __le16 als_buf[2]; u8 mask = 0; int j = 0; int ret, psdata; - memset(buf, 0, sizeof(buf)); + memset(&scan, 0, sizeof(scan)); /* figure out which data needs to be ready */ if (test_bit(0, indio_dev->active_scan_mask) || @@ -1268,9 +1271,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) if (ret < 0) return ret; if (test_bit(0, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[1]); + scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[0]); + scan.channels[j++] = le16_to_cpu(als_buf[0]); } if (mask & LTR501_STATUS_PS_RDY) { @@ -1278,10 +1281,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) &psdata, 2); if (ret < 0) goto done; - buf[j++] = psdata & LTR501_PS_DATA_MASK; + scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); done: diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index aa8ed1e3e89a5165f50402674148c0e7191699a4..b8e721bced5bae12d1816cb75933215c767c7bb0 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c @@ -75,6 +75,11 @@ struct max44000_data { struct mutex lock; struct regmap *regmap; + /* Ensure naturally aligned timestamp */ + struct { + u16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ @@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max44000_data *data = iio_priv(indio_dev); - u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ int index = 0; unsigned int regval; int ret; @@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) ret = max44000_read_alsval(data); if (ret < 0) goto out_unlock; - buf[index++] = ret; + data->scan.channels[index++] = ret; } if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); if (ret < 0) goto out_unlock; - buf[index] = regval; + data->scan.channels[index] = regval; } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c index 155faaea8c72faafb7d8be6659c20c33f7bb242f..8f5f857c2e7d9ecdc67964e3c1412367e740837a 100644 --- a/drivers/iio/light/si1145.c +++ b/drivers/iio/light/si1145.c @@ -1042,7 +1042,7 @@ static int si1145_initialize(struct si1145_data *data) SI1145_LED_CURRENT_45mA); if (ret < 0) return ret; - /* fallthrough */ + fallthrough; case 2: ret = i2c_smbus_write_byte_data(client, SI1145_REG_PS_LED21, diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 6a8ae145f0c00050c8943bb30d1eea3f8545796d..cbb44e401c0a9e176ea76b1b437c174f2b93d631 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -499,7 +499,7 @@ static int ak8974_detect(struct ak8974 *ak8974) switch (whoami) { case AK8974_WHOAMI_VALUE_AMI306: name = "ami306"; - /* fall-through */ + fallthrough; case AK8974_WHOAMI_VALUE_AMI305: ret = regmap_read(ak8974->map, AMI305_VER, &fw); if (ret) diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 03d71f7961772879074b4c6cf355a1f144d77e28..623766ff800b64b3f9db434dfa4ed00a93c53b10 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -366,6 +366,12 @@ struct ak8975_data { struct iio_mount_matrix orientation; struct regulator *vdd; struct regulator *vid; + + /* Ensure natural alignment of timestamp */ + struct { + s16 channels[3]; + s64 ts __aligned(8); + } scan; }; /* Enable attached power regulator if any. */ @@ -793,7 +799,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) const struct i2c_client *client = data->client; const struct ak_def *def = data->def; int ret; - s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ __le16 fval[3]; mutex_lock(&data->lock); @@ -816,12 +821,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) mutex_unlock(&data->lock); /* Clamp to valid range. */ - buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); - buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); - buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); + data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); + data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); - iio_push_to_buffers_with_timestamp(indio_dev, buff, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); + return; unlock: diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c index 654564c452489cbe62b6f6f1eacac10452896916..ad4b1fb2607aabd6a13e574c730f01c223034f50 100644 --- a/drivers/iio/proximity/mb1232.c +++ b/drivers/iio/proximity/mb1232.c @@ -40,6 +40,11 @@ struct mb1232_data { */ struct completion ranging; int irqnr; + /* Ensure correct alignment of data to push to IIO buffer */ + struct { + s16 distance; + s64 ts __aligned(8); + } scan; }; static irqreturn_t mb1232_handle_irq(int irq, void *dev_id) @@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mb1232_data *data = iio_priv(indio_dev); - /* - * triggered buffer - * 16-bit channel + 48-bit padding + 64-bit timestamp - */ - s16 buffer[8] = { 0 }; - buffer[0] = mb1232_read_distance(data); - if (buffer[0] < 0) + data->scan.distance = mb1232_read_distance(data); + if (data->scan.distance < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index dc0558b23158a015f2420308c0b0bc32c45b8ff4..fbc28f1a8b9291f69e7372b1ef576e72eae71325 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3034,7 +3034,7 @@ static int cm_rej_handler(struct cm_work *work) case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - /* fall through */ + fallthrough; case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN) @@ -3044,7 +3044,7 @@ static int cm_rej_handler(struct cm_work *work) break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - /* fall through */ + fallthrough; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_enter_timewait(cm_id_priv); @@ -3058,7 +3058,7 @@ static int cm_rej_handler(struct cm_work *work) cm_enter_timewait(cm_id_priv); break; } - /* fall through */ + fallthrough; default: pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), @@ -3116,7 +3116,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, msg_response = CM_MSG_RESPONSE_OTHER; break; } - /* fall through */ + fallthrough; default: pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), @@ -3227,7 +3227,7 @@ static int cm_mra_handler(struct cm_work *work) case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); - /* fall through */ + fallthrough; default: pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n", __func__, be32_to_cpu(cm_id_priv->id.local_id), @@ -4214,7 +4214,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; - /* fall through */ + fallthrough; case IB_QPT_XRC_TGT: *qp_attr_mask |= IB_QP_TIMEOUT; qp_attr->timeout = cm_id_priv->av.timeout; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 26de0dab60bbba00e7896d1d8261213baa1af843..7f0e91e92968da394c720bfcc5e2d20b4e3675e0 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1985,7 +1985,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, event.event = RDMA_CM_EVENT_ESTABLISHED; break; case IB_CM_DREQ_ERROR: - event.status = -ETIMEDOUT; /* fall through */ + event.status = -ETIMEDOUT; + fallthrough; case IB_CM_DREQ_RECEIVED: case IB_CM_DREP_RECEIVED: if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 513825e424bffa57efe7a4f47f327961b5e7ba1f..a92fc3f90bb5b37985f17bc0e22b10267cfb17bf 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -379,7 +379,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, { LIST_HEAD(tmp_list); unsigned int nr_cqs, i; - struct ib_cq *cq; + struct ib_cq *cq, *n; int ret; if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { @@ -412,7 +412,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, return 0; out_free_cqs: - list_for_each_entry(cq, &tmp_list, pool_entry) { + list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { cq->shared = false; ib_free_cq(cq); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index ef0cd2998671922f064207e9a95335a2eb160177..c36b4d2b61e0c044088f18c3a2df287666db1864 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2751,7 +2751,7 @@ static int __init ib_core_init(void) ret = addr_init(); if (ret) { - pr_warn("Could't init IB address resolution\n"); + pr_warn("Couldn't init IB address resolution\n"); goto err_ibnl; } diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 614cff89fc719116cd6ec52fd2f2c0acc3713cb2..13f43ab7220b05ac3438869810a4ddf99e723294 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -510,7 +510,6 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, switch (ctx->type) { case RDMA_RW_SIG_MR: case RDMA_RW_MR: - /* fallthrough */ for (i = 0; i < ctx->nr_ops; i++) { rdma_rw_update_lkey(&ctx->reg[i], ctx->reg[i].wr.wr.opcode != diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index d03dacaef78805ddcb35d8f97e8417528fe9cc58..1d184ea05eba1cd7071bb09e35306d34a0f66e47 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -794,7 +794,7 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); - /* fall through */ + fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); @@ -820,7 +820,7 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); - /* fall through */ + fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index ef04a261097fca2a6e2927496b2a728c1d8f6653..e47c5949013f3ada403dee00378ef413c4edb6ef 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -259,7 +259,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, return -EOPNOTSUPP; e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id; - /* fall through */ + fallthrough; case UVERBS_ATTR_TYPE_PTR_IN: /* Ensure that any data provided by userspace beyond the known * struct is zero. Userspace that knows how to use some future @@ -271,7 +271,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle, !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len)) return -EOPNOTSUPP; - /* fall through */ + fallthrough; case UVERBS_ATTR_TYPE_PTR_OUT: if (uattr->len < val_spec->u.ptr.min_len || (!val_spec->zero_trailing && diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3096e73797b775f706c4d257fd140024c9031ebe..307886737646ea73d37af36170a75987cb83ca02 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1801,7 +1801,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) dev_put(netdev); - if (!rc) { + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { netdev_speed = lksettings.base.speed; } else { netdev_speed = SPEED_1000; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 3f18efc0c29744507d3119bc81438b58f3aa067f..1d7a9ca5240c5b121bcccb0973b7e70038f4e562 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -752,12 +752,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) gsi_sqp = rdev->gsi_ctx.gsi_sqp; gsi_sah = rdev->gsi_ctx.gsi_sah; - /* remove from active qp list */ - mutex_lock(&rdev->qp_lock); - list_del(&gsi_sqp->list); - mutex_unlock(&rdev->qp_lock); - atomic_dec(&rdev->qp_count); - ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, @@ -772,6 +766,12 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) } bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); + /* remove from active qp list */ + mutex_lock(&rdev->qp_lock); + list_del(&gsi_sqp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + kfree(rdev->gsi_ctx.sqp_tbl); kfree(gsi_sah); kfree(gsi_sqp); @@ -792,11 +792,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) unsigned int flags; int rc; - mutex_lock(&rdev->qp_lock); - list_del(&qp->list); - mutex_unlock(&rdev->qp_lock); - atomic_dec(&rdev->qp_count); - bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); @@ -819,6 +814,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) goto sh_fail; } + mutex_lock(&rdev->qp_lock); + list_del(&qp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); @@ -2657,7 +2657,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, default: break; } - /* fall through */ + fallthrough; case IB_WR_SEND_WITH_INV: rc = bnxt_re_build_send_wqe(qp, wr, &wqe); break; @@ -3264,6 +3264,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, wc->wc_flags |= IB_WC_GRH; } +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + /* + * Check if the vlan is configured in the host. If not configured, it + * can be a transparent VLAN. So dont report the vlan id. + */ + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + return false; + return true; +} + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { @@ -3332,9 +3345,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { - wc->vlan_id = vlan_id; - wc->sl = sl; - wc->wc_flags |= IB_WC_WITH_VLAN; + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->sl = sl; + wc->wc_flags |= IB_WC_WITH_VLAN; + } } wc->port_num = 1; wc->vendor_err = orig_cqe->status; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index dad0df8a2467993aca1dac4d782bf2045292e62e..53aee5a42ab85074d356d9f4aa7ee86a421a8687 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -821,7 +821,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); @@ -1008,7 +1009,6 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev) static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) { struct bnxt_re_ring_attr rattr = {}; - struct bnxt_qplib_ctx *qplib_ctx; int num_vec_created = 0; int rc = 0, i; u8 type; @@ -1031,13 +1031,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) if (rc) goto dealloc_res; - qplib_ctx = &rdev->qplib_ctx; for (i = 0; i < rdev->num_msix - 1; i++) { struct bnxt_qplib_nq *nq; nq = &rdev->nq[i]; - nq->hwq.max_elements = (qplib_ctx->cq_count + - qplib_ctx->srqc_count + 2); + nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); if (rc) { ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 117b42349a283683c7ea006628a93f71ee42e9dd..f78da54a0bc50fc9814c346c216db1ea23387508 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -818,6 +818,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) u16 cmd_flags = 0; u32 qp_flags = 0; u8 pg_sz_lvl; + u32 tbl_indx; int rc; RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags); @@ -907,8 +908,9 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) rq->dbinfo.db = qp->dpi->dbr; rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); } - rcfw->qp_tbl[qp->id].qp_id = qp->id; - rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; + tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); + rcfw->qp_tbl[tbl_indx].qp_id = qp->id; + rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; return 0; @@ -935,10 +937,10 @@ static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) sq = &qp->sq; hwq = &sq->hwq; + /* First psn entry */ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg); if (!IS_ALIGNED(fpsne, PAGE_SIZE)) - indx_pad = ALIGN(fpsne, PAGE_SIZE) / size; - + indx_pad = (fpsne & ~PAGE_MASK) / size; hwq->pad_pgofft = indx_pad; hwq->pad_pg = (u64 *)psn_pg; hwq->pad_stride = size; @@ -959,6 +961,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) u16 cmd_flags = 0; u32 qp_flags = 0; u8 pg_sz_lvl; + u32 tbl_indx; u16 nsge; RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); @@ -1111,8 +1114,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) rq->dbinfo.db = qp->dpi->dbr; rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); } - rcfw->qp_tbl[qp->id].qp_id = qp->id; - rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; + tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); + rcfw->qp_tbl[tbl_indx].qp_id = qp->id; + rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; return 0; fail: @@ -1457,10 +1461,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct cmdq_destroy_qp req; struct creq_destroy_qp_resp resp; u16 cmd_flags = 0; + u32 tbl_indx; int rc; - rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID; - rcfw->qp_tbl[qp->id].qp_handle = NULL; + tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); + rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; + rcfw->qp_tbl[tbl_indx].qp_handle = NULL; RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); @@ -1468,8 +1474,8 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0); if (rc) { - rcfw->qp_tbl[qp->id].qp_id = qp->id; - rcfw->qp_tbl[qp->id].qp_handle = qp; + rcfw->qp_tbl[tbl_indx].qp_id = qp->id; + rcfw->qp_tbl[tbl_indx].qp_handle = qp; return rc; } @@ -1779,7 +1785,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, break; } - /* fall thru */ + fallthrough; case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM: case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV: { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 4e211162acee2ff167378eb3485f104befc5b398..f7736e34ac64c9c2ad4e69dd26f6a991e19ea6dc 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -307,14 +307,15 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, __le16 mcookie; u16 cookie; int rc = 0; - u32 qp_id; + u32 qp_id, tbl_indx; pdev = rcfw->pdev; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: err_event = (struct creq_qp_error_notification *)qp_event; qp_id = le32_to_cpu(err_event->xid); - qp = rcfw->qp_tbl[qp_id].qp_handle; + tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw); + qp = rcfw->qp_tbl[tbl_indx].qp_handle; dev_dbg(&pdev->dev, "Received QP error notification\n"); dev_dbg(&pdev->dev, "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", @@ -615,8 +616,9 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, cmdq->bmap_size = bmap_size; - rcfw->qp_tbl_size = qp_tbl_sz; - rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), + /* Allocate one extra to hold the QP1 entries */ + rcfw->qp_tbl_size = qp_tbl_sz + 1; + rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), GFP_KERNEL); if (!rcfw->qp_tbl) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 157387636d004432a4b5b1780f2a79dab22ab381..5f2f0a5a3560ff89902a04d3390f303c161e7e4a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -216,4 +216,9 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn); void bnxt_qplib_mark_qp_error(void *qp_handle); +static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw) +{ + /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/ + return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2; +} #endif /* __BNXT_QPLIB_RCFW_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 4cd475ea97a24e1064585e338bceedb22676cbca..64d44f51db4b6cbf0e080cdb74aa5d9832a63c02 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -149,7 +149,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 6404f0da10517a5040be2f0965f9d3701d7bb104..967890cd81f271a5dec23af0818469146fd18b2e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -47,6 +47,7 @@ struct bnxt_qplib_dev_attr { #define FW_VER_ARR_LEN 4 u8 fw_ver[FW_VER_ARR_LEN]; +#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256 u16 max_sgid; u16 max_mrw; u32 max_qp; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 77bc02a9228eef385652601720d6f5f3cf31bbb8..1f288c73ccfc26db9a52d3b0d74eafc654f87127 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2885,7 +2885,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) case MORIBUND: case CLOSING: stop_ep_timer(ep); - /*FALLTHROUGH*/ + fallthrough; case FPDU_MODE: if (ep->com.qp && ep->com.qp->srq) { srqidx = ABORT_RSS_SRQIDX_G( @@ -3759,7 +3759,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, send_fw_act_open_req(ep, atid); return; } - /* fall through */ + fallthrough; case FW_EADDRINUSE: set_bit(ACT_RETRY_INUSE, &ep->com.history); if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index ac48012c992f1d24beeabc955ef4d84922c7e4e6..cbddb20c61216e2facea737301c3edb9e46e9f47 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1165,7 +1165,7 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; } fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE; - /*FALLTHROUGH*/ + fallthrough; case IB_WR_RDMA_WRITE: fw_opcode = FW_RI_RDMA_WRITE_WR; swsqe->opcode = FW_RI_RDMA_WRITE; diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index b12e4665c9ab1be2c8d80ff9b64010b46747dbb8..4a4ec23978579bc31faa553eaf18140200621881 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c @@ -209,7 +209,6 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n) fallthrough; case 1: *dest++ = *src++; - /* fall through */ } } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 9af82ff933d7335664dad97b46b0c4df36c6d0a9..73d197e2173053015f3a49d883520eb4e87f6992 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: switch (prev->wr.opcode) { case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index da9888deff8c21c6745910232b0536f1bebf3804..6edcbdcd8f4323debe40b9fb200d74f6a4789060 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -65,8 +65,6 @@ #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 #define HNS_ROCE_MIN_CQE_CNT 16 -#define HNS_ROCE_RESERVED_SGE 1 - #define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_SGE_IN_WQE 2 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 07b4c85d341db864139f8653582dd3f46822933c..aeb3a6fa7d472bf83f7164afeeb1154eb9c7ad5b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -535,7 +535,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); - dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n", + dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", ext_sdb_alept, ext_sdb_alful); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d2968594664bbbf95b5217249c420ba595aff599..4cda95ed1fbe280edb620ebc61d45d0b9da50690 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -633,7 +633,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); - if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) { + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", wr->num_sge, hr_qp->rq.max_gs); ret = -EINVAL; @@ -653,7 +653,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, if (wr->num_sge < hr_qp->rq.max_gs) { dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); dseg->addr = 0; - dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); } /* rq support inline data */ @@ -787,8 +786,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, } if (wr->num_sge < srq->max_gs) { - dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); - dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); + dseg[i].len = 0; + dseg[i].lkey = cpu_to_le32(0x100); dseg[i].addr = 0; } @@ -5070,7 +5069,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) attr->srq_limit = limit_wl; attr->max_wr = srq->wqe_cnt - 1; - attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE; + attr->max_sge = srq->max_gs; out: hns_roce_free_cmd_mailbox(hr_dev, mailbox); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 1fb1c583d0f8fbabf7dc3c0781684c50de50ecd9..ac29be43b6bd559a25cbee215d2df34bdacce6ab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -92,9 +92,7 @@ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 -#define HNS_ROCE_INVALID_LKEY 0x0 -#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 - +#define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e94ca130ff5eb7fbfdcfc969d6f5a63057b7094e..c063c450c715f482bd7428ef337f8243d700babd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, return -EINVAL; } - hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + - HNS_ROCE_RESERVED_SGE); + hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); @@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, hr_qp->rq_inl_buf.wqe_cnt = 0; cap->max_recv_wr = cnt; - cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE; + cap->max_recv_sge = hr_qp->rq.max_gs; return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index f40a000e94ee7e550fa095f7d4f58baf74b5a1b2..b9e2dbd372b66a973d2cf5b13464d933b72b53ee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, spin_lock_init(&srq->lock); srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1); - srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE; + srq->max_gs = init_attr->attr.max_sge; if (udata) { ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index fa7a5ff498c73bfd851030db4bfda3b010aca81b..a3b95805c154e66e7a359934a3e4983f37f1912d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2443,7 +2443,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, case I40IW_CM_STATE_FIN_WAIT1: case I40IW_CM_STATE_LAST_ACK: cm_node->cm_id->rem_ref(cm_node->cm_id); - /* fall through */ + fallthrough; case I40IW_CM_STATE_TIME_WAIT: cm_node->state = I40IW_CM_STATE_CLOSED; i40iw_rem_ref_cm_node(cm_node); diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 688f196672215ee4b6c5384443218bb50cc48286..86d3f8aff329c12097125e8b27bf8abf76b056ef 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1964,7 +1964,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, info->out_rdrsp = true; break; case I40IW_AE_SOURCE_RSVD: - /* fallthrough */ default: break; } @@ -3762,14 +3761,14 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 56, info->entry[2].data); - /* fallthrough */ + fallthrough; case 2: set_64bit_val(wqe, 32, (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); set_64bit_val(wqe, 40, info->entry[1].data); - /* fallthrough */ + fallthrough; case 1: set_64bit_val(wqe, 0, LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index ae8b97c3066575388c6d3381b0d76f03b5eb8910..e1085634b8d9df2820ee72268db8c14bff3ae4d6 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -353,7 +353,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) i40iw_cm_disconn(iwqp); break; case I40IW_AE_BAD_CLOSE: - /* fall through */ case I40IW_AE_RESET_SENT: i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0); i40iw_cm_disconn(iwqp); @@ -413,7 +412,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: ctx_info->err_rq_idx_valid = false; - /* fall through */ + fallthrough; default: if (!info->sq && ctx_info->err_rq_idx_valid) { ctx_info->err_rq_idx = info->wqe_idx; diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 9c96ece5e7f33b48634a44c4d708f774704c4c9f..58a433135a0386ba6091e7bbee9935d9f6fb5e24 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1489,36 +1489,35 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) iwdev->iw_status = 0; i40iw_port_ibevent(iwdev); i40iw_destroy_rdma_device(iwdev->iwibdev); - /* fallthrough */ + fallthrough; case IP_ADDR_REGISTERED: if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); - /* fallthrough */ - /* fallthrough */ + fallthrough; case PBLE_CHUNK_MEM: i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); - /* fallthrough */ + fallthrough; case CEQ_CREATED: i40iw_dele_ceqs(iwdev); - /* fallthrough */ + fallthrough; case AEQ_CREATED: i40iw_destroy_aeq(iwdev); - /* fallthrough */ + fallthrough; case IEQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); - /* fallthrough */ + fallthrough; case ILQ_CREATED: i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); - /* fallthrough */ + fallthrough; case CCQ_CREATED: i40iw_destroy_ccq(iwdev); - /* fallthrough */ + fallthrough; case HMC_OBJS_CREATED: i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); - /* fallthrough */ + fallthrough; case CQP_CREATED: i40iw_destroy_cqp(iwdev, true); - /* fallthrough */ + fallthrough; case INITIAL_STATE: i40iw_cleanup_cm_core(&iwdev->cm_core); if (iwdev->vsi.pestat) { @@ -1528,7 +1527,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) i40iw_del_init_mem(iwdev); break; case INVALID_STATE: - /* fallthrough */ default: i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index d9c7ae6a7030b46fdbbce064806ec31852dcbde6..924be4b03c9a0ee16d50787153c69b039870c0dc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -814,13 +814,13 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, switch (rsrc->completion) { case PUDA_HASH_CRC_COMPLETE: i40iw_free_hash_desc(rsrc->hash_desc); - /* fall through */ + fallthrough; case PUDA_QP_CREATED: if (!reset) i40iw_puda_free_qp(rsrc); i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); - /* fallthrough */ + fallthrough; case PUDA_CQ_CREATED: if (!reset) i40iw_puda_free_cq(rsrc); diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 016524683e17eb8801a4f1da38dd8d860378c063..e07fb37af0865d60a1539d4a411be843f7f47ca2 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -190,9 +190,8 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: /* Just skip if no need to handle ARP cache */ @@ -247,9 +246,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: action = I40IW_ARP_DELETE; - /* Fall through */ + fallthrough; case NETDEV_UP: - /* Fall through */ case NETDEV_CHANGEADDR: i40iw_manage_arp_cache(iwdev, netdev->dev_addr, @@ -344,7 +342,7 @@ int i40iw_netdevice_event(struct notifier_block *notifier, switch (event) { case NETDEV_DOWN: iwdev->iw_status = 0; - /* Fall through */ + fallthrough; case NETDEV_UP: i40iw_port_ibevent(iwdev); break; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 6957e4f3404b10e78df64f1623fe22f3d2c5f2c6..b51339328a51eff1f153362f4479c29c301309e9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -810,7 +810,7 @@ void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, case I40IW_QP_STATE_RTS: if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE) i40iw_send_reset(iwqp->cm_node); - /* fall through */ + fallthrough; case I40IW_QP_STATE_IDLE: case I40IW_QP_STATE_TERMINATE: case I40IW_QP_STATE_CLOSING: @@ -2144,7 +2144,6 @@ static int i40iw_post_send(struct ib_qp *ibqp, switch (ib_wr->opcode) { case IB_WR_SEND: - /* fall-through */ case IB_WR_SEND_WITH_INV: if (ib_wr->opcode == IB_WR_SEND) { if (ib_wr->send_flags & IB_SEND_SOLICITED) @@ -2201,7 +2200,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, break; case IB_WR_RDMA_READ_WITH_INV: inv_stag = true; - /* fall-through*/ + fallthrough; case IB_WR_RDMA_READ: if (ib_wr->num_sge > I40IW_MAX_SGE_RD) { err = -EINVAL; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index f8b936b76dcdfd66fce2e3490ac8922c80f77897..8a3436994f809750be5352aab3adf9c180a35d3b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -765,13 +765,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { case MLX4_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX4_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX4_OPCODE_SEND: case MLX4_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 5e7910a517daefefb91635b92d43e5cf81c4e821..bd4f975e7f9acaff5f940a1d36b63399fa904e33 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -784,7 +784,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->ip_gids = true; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; - props->pkey_tbl_len = 1; + if (mdev->dev->caps.pkey_table_len[port]) + props->pkey_tbl_len = 1; props->max_mtu = IB_MTU_4096; props->max_vl_num = 2; props->state = IB_PORT_DOWN; diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index d844831179cf7c200e7d9ce15493b63557ad0193..5e4ec9786081cf3a42dfe2f51532b3d6f4ee581b 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -944,7 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, switch (sa_mad->mad_hdr.method) { case IB_MGMT_METHOD_SET: may_create = 1; - /* fall through */ + fallthrough; case IB_SA_METHOD_DELETE: req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f9ca6e000a8146e0404e794c7cbb7493c426f944..2975f350b9fd1076f2eafb3c2f0f3e5c423ab4d1 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1578,12 +1578,12 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, pd = to_mxrcd(init_attr->xrcd)->pd; xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; - /* fall through */ + fallthrough; case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); init_attr->recv_cq = init_attr->send_cq; - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: @@ -1592,7 +1592,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; - /* fall through */ + fallthrough; case IB_QPT_UD: { err = create_qp_common(pd, init_attr, udata, 0, &qp); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 0133ebb8d740e855a7f4e6718a80b159f135f38e..dceb0eb2bed1621e5530a5d55a2d301b7e8e04af 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -121,13 +121,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { case MLX5_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_RDMA_WRITE: wc->opcode = IB_WC_RDMA_WRITE; break; case MLX5_OPCODE_SEND_IMM: wc->wc_flags |= IB_WC_WITH_IMM; - /* fall through */ + fallthrough; case MLX5_OPCODE_SEND: case MLX5_OPCODE_SEND_INVAL: wc->opcode = IB_WC_SEND; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 454ce5de2de7174437ba31b9733a19770367a075..9bb9bb058932f1b36fd441b3b96407af4ac0ccc0 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -250,9 +250,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, if (MLX5_CAP_GEN(dev->mdev, vport_counters) && method == IB_MGMT_METHOD_GET) return process_pma_cmd(dev, port_num, in, out); - /* fallthrough */ + fallthrough; case MLX5_IB_VENDOR_CLASS1: - /* fallthrough */ case MLX5_IB_VENDOR_CLASS2: case IB_MGMT_CLASS_CONG_MGMT: { if (method != IB_MGMT_METHOD_GET && diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fbc45a5e76c5c6f6a55021938fcaa76f7e521c64..d60d63221b14d9224535f9d8dac42d1d5d7c288a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2872,7 +2872,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) break; case MLX5_EVENT_TYPE_GENERAL_EVENT: handle_general_event(ibdev, work->param, &ibev); - /* fall through */ + fallthrough; default: goto out; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 59fce5fac7a3342ef4d3ae07a3a62440b1328380..5758dbe640451cbe0d0df216d20930d4f175081a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -416,7 +416,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) switch (attr->qp_type) { case IB_QPT_XRC_INI: size += sizeof(struct mlx5_wqe_xrc_seg); - /* fall through */ + fallthrough; case IB_QPT_RC: size += sizeof(struct mlx5_wqe_ctrl_seg) + max(sizeof(struct mlx5_wqe_atomic_seg) + @@ -441,7 +441,7 @@ static int sq_overhead(struct ib_qp_init_attr *attr) if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) size += sizeof(struct mlx5_wqe_eth_pad) + sizeof(struct mlx5_wqe_eth_seg); - /* fall through */ + fallthrough; case IB_QPT_SMI: case MLX5_IB_QPT_HW_GSI: size += sizeof(struct mlx5_wqe_ctrl_seg) + diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index 0823c0bc7e73a94861079f02ca4f3681a2ca9faa..f051f4e06b539c791c1d7211a7c5a5c95efe3a9d 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -115,7 +115,7 @@ static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) switch ((cur_rate - 1) / req_rate) { case 0: return MTHCA_RATE_MEMFREE_FULL; case 1: return MTHCA_RATE_MEMFREE_HALF; - case 2: /* fall through */ + case 2: case 3: return MTHCA_RATE_MEMFREE_QUARTER; default: return MTHCA_RATE_MEMFREE_EIGHTH; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 6cdbec13756af5ac7741202fcf38cc54ed5d6dbe..c1751c9a0f625c92b97f60010417f3c8857b7172 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2134,7 +2134,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_SEND_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_SEND: hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); ocrdma_build_send(qp, hdr, wr); @@ -2148,7 +2148,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, case IB_WR_RDMA_WRITE_WITH_IMM: hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); hdr->immdt = ntohl(wr->ex.imm_data); - /* fall through */ + fallthrough; case IB_WR_RDMA_WRITE: hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4ce4e2eef6ccdb4ba12ce193197126483e807360..b49bef94637e5020dd2d5e64347ea306c80000a1 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3528,7 +3528,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, break; case IB_WR_RDMA_READ_WITH_INV: SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1); - /* fallthrough -- same is identical to RDMA READ */ + fallthrough; /* same is identical to RDMA READ */ case IB_WR_RDMA_READ: wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index ca5ea734e3d0465527aeaa42522b5575a745fd88..44150be215bf22d196a02cad5156b98b6614f3f0 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2973,11 +2973,11 @@ static u32 qib_6120_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_6120_L_STATE_ACTIVE: - /* fall through */ case IB_6120_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_6120_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index ea3ddb05cbadfec3c58ce32f252587388091af7a..0a6f26d4cb31004c9091118104483d17f4353d30 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3586,11 +3586,11 @@ static u32 qib_7220_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7220_L_STATE_ACTIVE: - /* fall through */ case IB_7220_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7220_L_STATE_DOWN: state = IB_PORT_DOWN; break; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 8bcbc884e5b613a7e77b13be9ebae2f70cc9f354..a10eab89aee4992002210ef7d479837c8fcbe335 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5508,11 +5508,11 @@ static u32 qib_7322_iblink_state(u64 ibcs) state = IB_PORT_ARMED; break; case IB_7322_L_STATE_ACTIVE: - /* fall through */ case IB_7322_L_STATE_ACT_DEFER: state = IB_PORT_ACTIVE; break; - default: /* fall through */ + default: + fallthrough; case IB_7322_L_STATE_DOWN: state = IB_PORT_DOWN; break; @@ -6533,7 +6533,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) "Invalid num_vls %u, using 4 VLs\n", qib_num_cfg_vls); qib_num_cfg_vls = 4; - /* fall through */ + fallthrough; case 4: ppd->vls_supported = IB_VL_VL0_3; break; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 79bb83222e8d13f74dd097f566bf39045bf2f72a..e7789e724f561c4a4009844650e765be1ea53f49 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -433,7 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) /* Bad mkey not a violation below level 2 */ if (ibp->rvp.mkeyprot < 2) break; - /* fall through */ + fallthrough; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: if (ibp->rvp.mkey_violations != 0xFFFF) @@ -828,7 +828,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, case IB_PORT_NOP: if (lstate == 0) break; - /* FALLTHROUGH */ + fallthrough; case IB_PORT_DOWN: if (lstate == 0) lstate = QIB_IB_LINKDOWN_ONLY; @@ -1928,7 +1928,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -1962,7 +1962,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = IB_MAD_RESULT_SUCCESS; goto bail; } - /* FALLTHROUGH */ + fallthrough; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); @@ -2322,7 +2322,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_get_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); @@ -2339,7 +2339,7 @@ static int process_cc(struct ib_device *ibdev, int mad_flags, ret = cc_set_congestion_control_table(ccp, ibdev, port); goto bail; - /* FALLTHROUGH */ + fallthrough; default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index aaf7438258fac80cb1bfa3c139fbc65f47037245..3915e5b4a9bc10a458cc7c1d9b4aae2fd5991d89 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -83,7 +83,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } - /* FALLTHROUGH */ + fallthrough; case OP(ATOMIC_ACKNOWLEDGE): /* * We can increment the tail pointer now that the last @@ -92,7 +92,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, */ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qp->s_tail_ack_queue = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ @@ -149,7 +149,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, case OP(RDMA_READ_RESPONSE_FIRST): qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_READ_RESPONSE_MIDDLE): qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; @@ -471,10 +471,10 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -510,10 +510,10 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; ss = &qp->s_sge; @@ -1807,7 +1807,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, if (!ret) goto rnr_nak; qp->r_rcv_len = 0; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): case OP(RDMA_WRITE_MIDDLE): send_middle: @@ -1839,7 +1839,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; - /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */ + fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index 99e11c347130e7364d6b5f807988f29da00e9334..8f8d6173665682a45814802a01ef137d299a45cf 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -763,7 +763,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd, * bringing the link up with traffic active on * 7220, e.g. */ ss->go_s99_running = 1; - /* fall through -- and start dma engine */ + fallthrough; /* and start dma engine */ case qib_sdma_event_e10_go_hw_start: /* This reference means the state machine is started */ sdma_get(&ppd->sdma_state); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index e17b91e2c22a91022930dc6cbbc896500bf6663a..554af4273a131218d0b6b3a42fd2d5833d0de356 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -161,7 +161,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -185,7 +185,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { @@ -351,7 +351,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, goto no_immediate_data; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; - /* FALLTHROUGH */ + fallthrough; case OP(SEND_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) @@ -440,7 +440,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, wc.ex.imm_data = ohdr->u.rc.imm_data; goto rdma_last_imm; } - /* FALLTHROUGH */ + fallthrough; case OP(RDMA_WRITE_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7acf9ba5358a417b493b5ad3515d29efcda7202d..f6c01bad5a74f5315f07bce97d4e932550899390 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -237,7 +237,7 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, case IB_QPT_GSI: if (ib_qib_disable_sma) break; - /* FALLTHROUGH */ + fallthrough; case IB_QPT_UD: qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); break; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index c9abe1c01e4eb4973e6647d7a77df9213e380536..662e7fc7f62835961b7cd427df59d60694c0e067 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -120,7 +120,7 @@ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) IB_QPS_ERR, NULL); if (status) { - usnic_err("Failed to transistion qp grp %u from %s to %s\n", + usnic_err("Failed to transition qp grp %u from %s to %s\n", qp_grp->grp_id, usnic_ib_qp_grp_state_to_string (cur_state), diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index afcc2abcf55c0680f148230e9bcd0598a579f706..9a8f2a9507be078057e57b577ffad6ea7216236f 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -238,7 +238,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ret = -EINVAL; goto err_qp; } - /* fall through */ + fallthrough; case IB_QPT_RC: case IB_QPT_UD: qp = kzalloc(sizeof(*qp), GFP_KERNEL); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 332a8ba94b81b0c5e00b8f6d569181c0b249a274..ee48befc8978619b477cfa7325d66be1f622773f 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1111,7 +1111,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, if (init_attr->port_num == 0 || init_attr->port_num > ibpd->device->phys_port_cnt) return ERR_PTR(-EINVAL); - /* fall through */ + fallthrough; case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 907203afbd99431cc900ae9d334d132f03666a46..77f2c7cd1216c5fff07a087f742dc6e749b7953d 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -40,6 +40,8 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib"); MODULE_DESCRIPTION("Soft RDMA transport"); MODULE_LICENSE("Dual BSD/GPL"); +bool rxe_initialized; + /* free resources for a rxe device all objects created for this device must * have been destroyed */ @@ -315,6 +317,7 @@ static int __init rxe_module_init(void) return err; rdma_link_register(&rxe_link_ops); + rxe_initialized = true; pr_info("loaded\n"); return 0; } @@ -326,6 +329,7 @@ static void __exit rxe_module_exit(void) rxe_net_exit(); rxe_cache_exit(); + rxe_initialized = false; pr_info("unloaded\n"); } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index fb07eed9e4028345a02aac983269241012de2fae..cae1b0a24c850d694fd7ddbcf14dad98b689c972 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -67,6 +67,8 @@ #define RXE_ROCE_V2_SPORT (0xc000) +extern bool rxe_initialized; + static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 4bc88708b355885b3f429e75af0dc3cb1975cd90..7b4df0028388cb82278b65274ff6b50abde52c76 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -282,7 +282,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, if ((syn & AETH_TYPE_MASK) != AETH_ACK) return COMPST_ERROR; - /* fall through */ + fallthrough; /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH) */ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index cdd811a45120f4e5a5b274895c84d6759f200a8e..ce24144de16a96fd33f8fba7ebe7950bbdda341f 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -205,6 +205,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, vaddr = page_address(sg_page_iter_page(&sg_iter)); if (!vaddr) { pr_warn("null vaddr\n"); + ib_umem_release(umem); err = -ENOMEM; goto err1; } diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c index ccda5f5a3bc0aad81915936e2c1d4e349de60e86..2af31d421bfc3a79c15e5845924978f405f5620e 100644 --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c +++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c @@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp) struct net_device *ndev; struct rxe_dev *exists; + if (!rxe_initialized) { + pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); + return -EAGAIN; + } + len = sanitize_arg(val, intf, sizeof(intf)); if (!len) { pr_err("add: invalid interface name\n"); diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 08f05ac5f5d526f94d48f5b68ae9512463c4bd15..ecdac3f8fcc9f2137624e25f5aba7e418368a7e9 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -71,7 +71,7 @@ void rxe_do_task(unsigned long data) case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; - /* fall through */ + fallthrough; case TASK_STATE_ARMED: spin_unlock_irqrestore(&task->state_lock, flags); return; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index bb61e534e468221ae7413c7833a0492d31bdfcaf..8522e9a3e9140b509605f1751d6dfa20cfbe3878 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -540,7 +540,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_WRITE_WITH_IMM: wr->ex.imm_data = ibwr->ex.imm_data; - /* fall through */ + fallthrough; case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr; @@ -1056,7 +1056,7 @@ static ssize_t parent_show(struct device *device, struct rxe_dev *rxe = rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); + return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); } static DEVICE_ATTR_RO(parent); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 1662216be66df2614ffd0ae26f8ec480218f8b3f..66764f7ef072adc730d0c76e57968af03a66634a 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1224,12 +1224,10 @@ static void siw_cm_llp_data_ready(struct sock *sk) switch (cep->state) { case SIW_EPSTATE_RDMA_MODE: - /* fall through */ case SIW_EPSTATE_LISTENING: break; case SIW_EPSTATE_AWAIT_MPAREQ: - /* fall through */ case SIW_EPSTATE_AWAIT_MPAREP: siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR); break; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 857be5a7d0bddc71de7ec2f2fd9f4db492455086..4bd1f1f84057bd1977af05bb48c5077d0c865aef 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1215,7 +1215,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error) case RDMAP_SEND_SE: case RDMAP_SEND_SE_INVAL: wqe->rqe.flags |= SIW_WQE_SOLICITED; - /* Fall through */ + fallthrough; case RDMAP_SEND: case RDMAP_SEND_INVAL: @@ -1386,7 +1386,7 @@ int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb, * DDP segment. */ qp->rx_fpdu->first_ddp_seg = 0; - /* Fall through */ + fallthrough; case SIW_GET_DATA_START: /* diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 9f53aa4feb8782c0880120da0e26fe545e24f46e..d19d8325588b5cee9f3574a291432962e1551ad5 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -1042,7 +1042,7 @@ int siw_qp_sq_process(struct siw_qp *qp) case SIW_OP_SEND_REMOTE_INV: case SIW_OP_WRITE: siw_wqe_put_mem(wqe, tx_type); - /* Fall through */ + fallthrough; case SIW_OP_INVAL_STAG: case SIW_OP_REG_MR: @@ -1128,7 +1128,7 @@ int siw_qp_sq_process(struct siw_qp *qp) case SIW_OP_READ: case SIW_OP_READ_LOCAL_INV: siw_wqe_put_mem(wqe, tx_type); - /* Fall through */ + fallthrough; case SIW_OP_INVAL_STAG: case SIW_OP_REG_MR: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 9bf0fa30df28c9f76480bfcdf2b20846fe409864..7c41fb040f7c6fb5ae6ee4c8b0496e705f6e8916 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -512,13 +512,13 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, return ipoib_cm_req_handler(cm_id, event); case IB_CM_DREQ_RECEIVED: ib_send_cm_drep(cm_id, NULL, 0); - /* Fall through */ + fallthrough; case IB_CM_REJ_RECEIVED: p = cm_id->context; priv = ipoib_priv(p->dev); if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_warn(priv, "unable to move qp to error state\n"); - /* Fall through */ + fallthrough; default: return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 752581a8627bd07a2e10c477337252f8a27496d5..ab75b7f745d41573e3c4ea2dc6e5531e548aa667 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -502,7 +502,7 @@ static struct net_device *ipoib_get_net_dev_by_params( default: dev_warn_ratelimited(&dev->dev, "duplicate IP address detected\n"); - /* Fall through */ + fallthrough; case 1: return net_dev; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 699e075ae1b366e69ef39bd44c21893ba01c162c..2f3ebc0a75d92466456ab8948b50e723ab45f870 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -711,7 +711,7 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve case RDMA_CM_EVENT_REJECTED: iser_info("Connection rejected: %s\n", rdma_reject_msg(cma_id, event->status)); - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 61e2f7fc513d060a4386fb6f7bd11e476a9d9889..695f701dc43d9016b91331bd518c36721965dd83 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -140,15 +140,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { - dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ib_dev, dma_addr)) goto dma_map_fail; rx_desc->dma_addr = dma_addr; rx_sg = &rx_desc->rx_sg; - rx_sg->addr = rx_desc->dma_addr; + rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); rx_sg->length = ISER_RX_PAYLOAD_SIZE; rx_sg->lkey = device->pd->local_dma_lkey; rx_desc->rx_cqe.done = isert_recv_done; @@ -160,7 +160,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (j = 0; j < i; j++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); isert_conn->rx_descs = NULL; @@ -181,7 +181,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) rx_desc = isert_conn->rx_descs; for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } kfree(isert_conn->rx_descs); @@ -299,10 +299,9 @@ isert_free_login_buf(struct isert_conn *isert_conn) ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); kfree(isert_conn->login_rsp_buf); - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, - DMA_FROM_DEVICE); - kfree(isert_conn->login_req_buf); + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); + kfree(isert_conn->login_desc); } static int @@ -311,25 +310,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, { int ret; - isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), + isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), GFP_KERNEL); - if (!isert_conn->login_req_buf) + if (!isert_conn->login_desc) return -ENOMEM; - isert_conn->login_req_dma = ib_dma_map_single(ib_dev, - isert_conn->login_req_buf, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); + isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, + isert_conn->login_desc->buf, + ISER_RX_SIZE, DMA_FROM_DEVICE); + ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); if (ret) { - isert_err("login_req_dma mapping error: %d\n", ret); - isert_conn->login_req_dma = 0; - goto out_free_login_req_buf; + isert_err("login_desc dma mapping error: %d\n", ret); + isert_conn->login_desc->dma_addr = 0; + goto out_free_login_desc; } isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); if (!isert_conn->login_rsp_buf) { ret = -ENOMEM; - goto out_unmap_login_req_buf; + goto out_unmap_login_desc; } isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, @@ -346,11 +345,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, out_free_login_rsp_buf: kfree(isert_conn->login_rsp_buf); -out_unmap_login_req_buf: - ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); -out_free_login_req_buf: - kfree(isert_conn->login_req_buf); +out_unmap_login_desc: + ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); +out_free_login_desc: + kfree(isert_conn->login_desc); return ret; } @@ -476,7 +475,7 @@ isert_connect_release(struct isert_conn *isert_conn) if (isert_conn->qp) isert_destroy_qp(isert_conn); - if (isert_conn->login_req_buf) + if (isert_conn->login_desc) isert_free_login_buf(isert_conn); isert_device_put(device); @@ -664,8 +663,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_ESTABLISHED: isert_connected_handler(cma_id); break; - case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ - case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ ret = isert_disconnected_handler(cma_id, event->event); break; @@ -684,7 +683,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_REJECTED: isert_info("Connection rejected: %s\n", rdma_reject_msg(cma_id, event->status)); - /* fall through */ + fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: ret = isert_connect_error(cma_id); @@ -862,17 +861,18 @@ isert_login_post_recv(struct isert_conn *isert_conn) int ret; memset(&sge, 0, sizeof(struct ib_sge)); - sge.addr = isert_conn->login_req_dma; + sge.addr = isert_conn->login_desc->dma_addr + + isert_get_hdr_offset(isert_conn->login_desc); sge.length = ISER_RX_PAYLOAD_SIZE; sge.lkey = isert_conn->device->pd->local_dma_lkey; isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); - isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; + isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); - rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; + rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; @@ -949,7 +949,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, static void isert_rx_login_req(struct isert_conn *isert_conn) { - struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; + struct iser_rx_desc *rx_desc = isert_conn->login_desc; int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; @@ -961,7 +961,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) if (login->first_request) { struct iscsi_login_req *login_req = - (struct iscsi_login_req *)&rx_desc->iscsi_header; + (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); /* * Setup the initial iscsi_login values from the leading * login request PDU. @@ -980,13 +980,13 @@ isert_rx_login_req(struct isert_conn *isert_conn) login->tsih = be16_to_cpu(login_req->tsih); } - memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); + memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); isert_dbg("Using login payload size: %d, rx_buflen: %d " "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, MAX_KEY_VALUE_PAIRS); - memcpy(login->req_buf, &rx_desc->data[0], size); + memcpy(login->req_buf, isert_get_data(rx_desc), size); if (login->first_request) { complete(&isert_conn->login_comp); @@ -1051,14 +1051,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, if (imm_data_len != data_len) { sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, - &rx_desc->data[0], imm_data_len); + isert_get_data(rx_desc), imm_data_len); isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", sg_nents, imm_data_len); } else { sg_init_table(&isert_cmd->sg, 1); cmd->se_cmd.t_data_sg = &isert_cmd->sg; cmd->se_cmd.t_data_nents = 1; - sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); + sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), + imm_data_len); isert_dbg("Transfer Immediate imm_data_len: %d\n", imm_data_len); } @@ -1127,9 +1128,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, } isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " "sg_nents: %u from %p %u\n", sg_start, sg_off, - sg_nents, &rx_desc->data[0], unsol_data_len); + sg_nents, isert_get_data(rx_desc), unsol_data_len); - sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], + sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), unsol_data_len); rc = iscsit_check_dataout_payload(cmd, hdr, false); @@ -1188,7 +1189,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd } cmd->text_in_ptr = text_in; - memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); + memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); return iscsit_process_text_cmd(conn, cmd, hdr); } @@ -1198,7 +1199,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, uint32_t read_stag, uint64_t read_va, uint32_t write_stag, uint64_t write_va) { - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); struct iscsi_conn *conn = isert_conn->conn; struct iscsi_cmd *cmd; struct isert_cmd *isert_cmd; @@ -1296,8 +1297,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) struct isert_conn *isert_conn = wc->qp->qp_context; struct ib_device *ib_dev = isert_conn->cm_id->device; struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); - struct iscsi_hdr *hdr = &rx_desc->iscsi_header; - struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; + struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); + struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); uint64_t read_va = 0, write_va = 0; uint32_t read_stag = 0, write_stag = 0; @@ -1311,7 +1312,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) rx_desc->in_use = true; ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, @@ -1346,7 +1347,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) read_stag, read_va, write_stag, write_va); ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void @@ -1360,8 +1361,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } - ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; @@ -1376,8 +1377,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) complete(&isert_conn->login_req_comp); mutex_unlock(&isert_conn->mutex); - ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, + ISER_RX_SIZE, DMA_FROM_DEVICE); } static void @@ -1470,7 +1471,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) transport_generic_free_cmd(&cmd->se_cmd, 0); break; } - /* fall through */ + fallthrough; default: iscsit_release_cmd(cmd); break; @@ -1648,7 +1649,7 @@ isert_do_control_comp(struct work_struct *work) switch (cmd->i_state) { case ISTATE_SEND_TASKMGTRSP: iscsit_tmr_post_handler(cmd, cmd->conn); - /* fall through */ + fallthrough; case ISTATE_SEND_REJECT: case ISTATE_SEND_TEXTRSP: cmd->i_state = ISTATE_SENT_STATUS; diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index c55f7d9bfced60cb51e67201fb74d5770b9f681d..7fee4a65e181abe61a387adcaa8342f4cc10c4d0 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -59,9 +59,11 @@ ISERT_MAX_TX_MISC_PDUS + \ ISERT_MAX_RX_MISC_PDUS) -#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ - (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe) + sizeof(bool))) +/* + * RX size is default of 8k plus headers, but data needs to align to + * 512 boundary, so use 1024 to have the extra space for alignment. + */ +#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024) /* Maximum support is 16MB I/O size */ #define ISCSI_ISER_MAX_SG_TABLESIZE 4096 @@ -81,21 +83,41 @@ enum iser_conn_state { }; struct iser_rx_desc { - struct iser_ctrl iser_header; - struct iscsi_hdr iscsi_header; - char data[ISCSI_DEF_MAX_RECV_SEG_LEN]; + char buf[ISER_RX_SIZE]; u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; bool in_use; - char pad[ISER_RX_PAD_SIZE]; -} __packed; +}; static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe) { return container_of(cqe, struct iser_rx_desc, rx_cqe); } +static void *isert_get_iser_hdr(struct iser_rx_desc *desc) +{ + return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN; +} + +static size_t isert_get_hdr_offset(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) - (void *)desc->buf; +} + +static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc) +{ + return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl); +} + +static void *isert_get_data(struct iser_rx_desc *desc) +{ + void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN; + + WARN_ON((uintptr_t)data & 511); + return data; +} + struct iser_tx_desc { struct iser_ctrl iser_header; struct iscsi_hdr iscsi_header; @@ -142,9 +164,8 @@ struct isert_conn { u32 responder_resources; u32 initiator_depth; bool pi_support; - struct iser_rx_desc *login_req_buf; + struct iser_rx_desc *login_desc; char *login_rsp_buf; - u64 login_req_dma; int login_req_len; u64 login_rsp_dma; struct iser_rx_desc *rx_descs; diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index 874a8eb7638c217d79c43d078dc3e7f727401dfa..4933085a864a250bb6c870516cbc579420c253a8 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -547,7 +547,6 @@ static void vema_get(struct opa_vnic_vema_port *port, vema_get_mac_entries(port, recvd_mad, rsp_mad); break; case OPA_EM_ATTR_IFACE_UCAST_MACS: - /* fall through */ case OPA_EM_ATTR_IFACE_MCAST_MACS: vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id); break; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c index 3d7877534bcc9d9cc33ab6f6605598ee3435d07b..cf6a2be61695dc56b8a3be5e54ea7016800dc875 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c @@ -152,13 +152,6 @@ static struct attribute_group rtrs_srv_stats_attr_group = { .attrs = rtrs_srv_stats_attrs, }; -static void rtrs_srv_dev_release(struct device *dev) -{ - struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); - - kfree(srv); -} - static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) { struct rtrs_srv *srv = sess->srv; @@ -172,7 +165,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) goto unlock; } srv->dev.class = rtrs_dev_class; - srv->dev.release = rtrs_srv_dev_release; err = dev_set_name(&srv->dev, "%s", sess->s.sessname); if (err) goto unlock; @@ -182,16 +174,16 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) * sysfs files are created */ dev_set_uevent_suppress(&srv->dev, true); - err = device_register(&srv->dev); + err = device_add(&srv->dev); if (err) { - pr_err("device_register(): %d\n", err); + pr_err("device_add(): %d\n", err); goto put; } srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj); if (!srv->kobj_paths) { err = -ENOMEM; pr_err("kobject_create_and_add(): %d\n", err); - device_unregister(&srv->dev); + device_del(&srv->dev); goto unlock; } dev_set_uevent_suppress(&srv->dev, false); @@ -216,7 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) kobject_del(srv->kobj_paths); kobject_put(srv->kobj_paths); mutex_unlock(&srv->paths_mutex); - device_unregister(&srv->dev); + device_del(&srv->dev); } else { mutex_unlock(&srv->paths_mutex); } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index a219bd1bdbc26525b3144356c1bc76080e67a273..28f6414dfa3dc444856590463123a1242338e534 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -1319,6 +1319,13 @@ static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess) return sess->cur_cq_vector; } +static void rtrs_srv_dev_release(struct device *dev) +{ + struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); + + kfree(srv); +} + static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, const uuid_t *paths_uuid) { @@ -1336,6 +1343,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, uuid_copy(&srv->paths_uuid, paths_uuid); srv->queue_depth = sess_queue_depth; srv->ctx = ctx; + device_initialize(&srv->dev); + srv->dev.release = rtrs_srv_dev_release; srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), GFP_KERNEL); diff --git a/drivers/input/joystick/fsia6b.c b/drivers/input/joystick/fsia6b.c index e78c4c768990934d979b21d1e73c1efc399e1eb4..76ffdec5c183f1d4fcd2635134143e6c19c2e4a6 100644 --- a/drivers/input/joystick/fsia6b.c +++ b/drivers/input/joystick/fsia6b.c @@ -102,12 +102,12 @@ static irqreturn_t fsia6b_serio_irq(struct serio *serio, input_report_key(fsia6b->dev, sw_id++, sw_state == 0); - /* fall-through */ + fallthrough; case '2': input_report_key(fsia6b->dev, sw_id++, sw_state == 1); - /* fall-through */ + fallthrough; case '1': input_report_key(fsia6b->dev, sw_id++, diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c index 88df68cc4ac656f8166f57a327119703f2ea0190..d37645e496ff17838d443699d549e42ec0f5d4c7 100644 --- a/drivers/input/joystick/gamecon.c +++ b/drivers/input/joystick/gamecon.c @@ -885,7 +885,6 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type) case GC_MULTI: input_set_capability(input_dev, EV_KEY, BTN_TRIGGER); - /* fall through */ break; case GC_PSX: diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c index 959c1d82aa662cd5db70857fc8feefc8efb57708..1cedb45ba49702e03bb94106f67ff540fab62326 100644 --- a/drivers/input/tablet/wacom_serial4.c +++ b/drivers/input/tablet/wacom_serial4.c @@ -213,7 +213,7 @@ static void wacom_handle_model_response(struct wacom *wacom) case 0x3731: /* PL-710 */ wacom->res_x = 2540; wacom->res_y = 2540; - /* fall through */ + fallthrough; case 0x3535: /* PL-550 */ case 0x3830: /* PL-800 */ wacom->extra_z_bits = 2; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 6b71b0aff1152f0f50c4bad8a8b6343ccfbc3c0b..98f17fa3a8926e2e02d7e9c0bba3090291905cec 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -477,7 +477,7 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) bootloader = appmode - 0x24; break; } - /* Fall through - for normal case */ + fallthrough; /* for normal case */ case 0x4c: case 0x4d: case 0x5a: diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c index 607d1aeb595d7fcf765725bd20508bf56529aad0..bb1699e0d3c78260c4b3807b550df6b907c815ff 100644 --- a/drivers/input/touchscreen/wm831x-ts.c +++ b/drivers/input/touchscreen/wm831x-ts.c @@ -290,7 +290,7 @@ static int wm831x_ts_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n", pdata->isel); - /* Fall through */ + fallthrough; case 200: case 0: wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2, diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index befd111049c0e36748c326711df5741ab5b2be7e..cf07491b7415be7363f253d28259e60f8a834735 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -55,12 +55,18 @@ static int icc_summary_show(struct seq_file *s, void *data) icc_summary_show_one(s, n); hlist_for_each_entry(r, &n->req_list, req_node) { + u32 avg_bw = 0, peak_bw = 0; + if (!r->dev) continue; + if (r->enabled) { + avg_bw = r->avg_bw; + peak_bw = r->peak_bw; + } + seq_printf(s, " %-27s %12u %12u %12u\n", - dev_name(r->dev), r->tag, r->avg_bw, - r->peak_bw); + dev_name(r->dev), r->tag, avg_bw, peak_bw); } } } diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index a3d2ef1d9903746e67768332872ac09376ba1b0b..609db9c95fd7a1c37c50ffd3bf25e1c2956c6a8c 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -52,8 +52,20 @@ static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b) return 1; } +static u64 bcm_div(u64 num, u32 base) +{ + /* Ensure that small votes aren't lost. */ + if (num && num < base) + return 1; + + do_div(num, base); + + return num; +} + static void bcm_aggregate(struct qcom_icc_bcm *bcm) { + struct qcom_icc_node *node; size_t i, bucket; u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0}; u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0}; @@ -61,22 +73,21 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm) for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { for (i = 0; i < bcm->num_nodes; i++) { - temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width; - do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels); + node = bcm->nodes[i]; + temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, + node->buswidth * node->channels); agg_avg[bucket] = max(agg_avg[bucket], temp); - temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width; - do_div(temp, bcm->nodes[i]->buswidth); + temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, + node->buswidth); agg_peak[bucket] = max(agg_peak[bucket], temp); } temp = agg_avg[bucket] * 1000ULL; - do_div(temp, bcm->aux_data.unit); - bcm->vote_x[bucket] = temp; + bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit); temp = agg_peak[bucket] * 1000ULL; - do_div(temp, bcm->aux_data.unit); - bcm->vote_y[bucket] = temp; + bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); } if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 && diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 1f061d91e0b84972352418b131e92656c72ffb8e..626b97d0dd21a9fd62013841ed343acaff27171d 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -10,7 +10,7 @@ config AMD_IOMMU select IOMMU_API select IOMMU_IOVA select IOMMU_DMA - depends on X86_64 && PCI && ACPI + depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 958050c213f98849239efac787bc329269d8cea8..445a08d23fedc2b9b737db9d2dd34eb1c0817884 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1511,7 +1511,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling it. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; case 0x11: @@ -1520,8 +1527,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) + + /* + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. + * XT, GAM also requires GA mode. Therefore, we need to + * check cmpxchg16b support before enabling them. + */ + if (!boot_cpu_has(X86_FEATURE_CX16) || + ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + break; + } + /* * Note: Since iommu_update_intcapxt() leverages * the IOMMU MMIO access to MSI capability block registers @@ -2258,7 +2275,7 @@ static void iommu_enable_ga(struct amd_iommu *iommu) switch (amd_iommu_guest_ir) { case AMD_IOMMU_GUEST_IR_VAPIC: iommu_feature_enable(iommu, CONTROL_GAM_EN); - /* Fall through */ + fallthrough; case AMD_IOMMU_GUEST_IR_LEGACY_GA: iommu_feature_enable(iommu, CONTROL_GA_EN); iommu->irte_ops = &irte_128_ops; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index ba9f3dbc5b94796adb199006390bdbcad8741963..07ae8b93887e564b31afc30e15b6d12972da56ee 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2659,7 +2659,12 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; - if (dev_data->iommu_v2) + /* + * Do not identity map IOMMUv2 capable devices when memory encryption is + * active, because some of those devices (AMD GPUs) don't have the + * encryption bit in their DMA-mask and require remapping. + */ + if (!mem_encrypt_active() && dev_data->iommu_v2) return IOMMU_DOMAIN_IDENTITY; return 0; @@ -3292,6 +3297,7 @@ static int alloc_irq_index(u16 devid, int count, bool align, static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, struct amd_ir_data *data) { + bool ret; struct irq_remap_table *table; struct amd_iommu *iommu; unsigned long flags; @@ -3309,10 +3315,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, entry = (struct irte_ga *)table->table; entry = &entry[index]; - entry->lo.fields_remap.valid = 0; - entry->hi.val = irte->hi.val; - entry->lo.val = irte->lo.val; - entry->lo.fields_remap.valid = 1; + + ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, + entry->lo.val, entry->hi.val, + irte->lo.val, irte->hi.val); + /* + * We use cmpxchg16 to atomically update the 128-bit IRTE, + * and it cannot be updated by the hardware or other processors + * behind us, so the return value of cmpxchg16 should be the + * same as the old value. + */ + WARN_ON(!ret); + if (data) data->ref = entry; @@ -3850,6 +3864,7 @@ int amd_iommu_deactivate_guest_mode(void *data) struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; struct irq_cfg *cfg = ir_data->cfg; + u64 valid = entry->lo.fields_remap.valid; if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry || !entry->lo.fields_vapic.guest_mode) @@ -3858,6 +3873,7 @@ int amd_iommu_deactivate_guest_mode(void *data) entry->lo.val = 0; entry->hi.val = 0; + entry->lo.fields_remap.valid = valid; entry->lo.fields_remap.dm = apic->irq_dest_mode; entry->lo.fields_remap.int_type = apic->irq_delivery_mode; entry->hi.fields.vector = cfg->vector; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index c259108ab6dd78fcb0514fd8d6f03240dc21b7d2..0d175aed1d92fb4203fbebf09c4d2fd1854bf871 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -737,6 +737,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) might_sleep(); + /* + * When memory encryption is active the device is likely not in a + * direct-mapped domain. Forbid using IOMMUv2 functionality for now. + */ + if (mem_encrypt_active()) + return -ENODEV; + if (!amd_iommu_v2_supported()) return -ENODEV; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 7196207be7eab29d118588cefa3e8ff1f44c1863..c192544e874b1799d425ecf030a48c4b08053079 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -903,7 +903,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_CFGI_CD: cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); - /* Fallthrough */ + fallthrough; case CMDQ_OP_CFGI_STE: cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); @@ -936,7 +936,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); - /* Fallthrough */ + fallthrough; case CMDQ_OP_TLBI_S12_VMALL: cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); break; @@ -1036,7 +1036,6 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) */ return; case CMDQ_ERR_CERROR_ILL_IDX: - /* Fallthrough */ default: break; } @@ -3758,7 +3757,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) switch (FIELD_GET(IDR0_STALL_MODEL, reg)) { case IDR0_STALL_MODEL_FORCE: smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; - /* Fallthrough */ + fallthrough; case IDR0_STALL_MODEL_STALL: smmu->features |= ARM_SMMU_FEAT_STALLS; } @@ -3778,7 +3777,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) switch (FIELD_GET(IDR0_TTF, reg)) { case IDR0_TTF_AARCH32_64: smmu->ias = 40; - /* Fallthrough */ + fallthrough; case IDR0_TTF_AARCH64: break; default: @@ -3875,7 +3874,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) default: dev_info(smmu->dev, "unknown output address size. Truncating to 48-bit\n"); - /* Fallthrough */ + fallthrough; case IDR5_OAS_48_BIT: smmu->oas = 48; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4959f5df21bd0714b5e88c2dc165c13ff16476eb..5141d49a046baa5ab80d408c06ebb15685c93712 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !gfpflags_allow_blocking(gfp) && !coherent) - cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, - gfp); + page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, + gfp, NULL); else cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); if (!cpu_addr) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e9864e52b0e96a5af1e2cff060996430cab1218f..87b17bac04c27c24622b825a4e1431817bc7cb16 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level) return (level - 1) * LEVEL_STRIDE; } -static inline int pfn_level_offset(unsigned long pfn, int level) +static inline int pfn_level_offset(u64 pfn, int level) { return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; } -static inline unsigned long level_mask(int level) +static inline u64 level_mask(int level) { - return -1UL << level_to_offset_bits(level); + return -1ULL << level_to_offset_bits(level); } -static inline unsigned long level_size(int level) +static inline u64 level_size(int level) { - return 1UL << level_to_offset_bits(level); + return 1ULL << level_to_offset_bits(level); } -static inline unsigned long align_to_level(unsigned long pfn, int level) +static inline u64 align_to_level(u64 pfn, int level) { return (pfn + level_size(level) - 1) & level_mask(level); } static inline unsigned long lvl_to_nr_pages(unsigned int lvl) { - return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); + return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); } /* VT-d pages must always be _smaller_ than MM pages. Otherwise things @@ -364,7 +364,6 @@ static int iommu_skip_te_disable; int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); -#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2)) struct device_domain_info *get_domain_info(struct device *dev) { @@ -374,8 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev) return NULL; info = dev_iommu_priv_get(dev); - if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO || - info == DEFER_DEVICE_DOMAIN_INFO)) + if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO)) return NULL; return info; @@ -742,11 +740,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, return &context[devfn]; } -static int iommu_dummy(struct device *dev) -{ - return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO; -} - static bool attach_deferred(struct device *dev) { return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO; @@ -779,6 +772,53 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge) return false; } +static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + u32 vtbar; + int rc; + + /* We know that this device on this chipset has its own IOMMU. + * If we find it under a different IOMMU, then the BIOS is lying + * to us. Hope that the IOMMU for this device is actually + * disabled, and it needs no translation... + */ + rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); + if (rc) { + /* "can't" happen */ + dev_info(&pdev->dev, "failed to run vt-d quirk\n"); + return false; + } + vtbar &= 0xffff0000; + + /* we know that the this iommu should be at offset 0xa000 from vtbar */ + drhd = dmar_find_matched_drhd_unit(pdev); + if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { + pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); + return true; + } + + return false; +} + +static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev) +{ + if (!iommu || iommu->drhd->ignored) + return true; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB && + quirk_ioat_snb_local_iommu(pdev)) + return true; + } + + return false; +} + struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; @@ -788,7 +828,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) u16 segment = 0; int i; - if (!dev || iommu_dummy(dev)) + if (!dev) return NULL; if (dev_is_pci(dev)) { @@ -805,7 +845,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) dev = &ACPI_COMPANION(dev)->dev; rcu_read_lock(); - for_each_active_iommu(iommu, drhd) { + for_each_iommu(iommu, drhd) { if (pdev && segment != drhd->segment) continue; @@ -841,6 +881,9 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) } iommu = NULL; out: + if (iommu_is_dummy(iommu, dev)) + iommu = NULL; + rcu_read_unlock(); return iommu; @@ -2447,7 +2490,7 @@ struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; - if (unlikely(attach_deferred(dev) || iommu_dummy(dev))) + if (unlikely(attach_deferred(dev))) return NULL; /* No lock here, assumes no domain exit in normal case */ @@ -3989,35 +4032,6 @@ static void __init iommu_exit_mempool(void) iova_cache_put(); } -static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) -{ - struct dmar_drhd_unit *drhd; - u32 vtbar; - int rc; - - /* We know that this device on this chipset has its own IOMMU. - * If we find it under a different IOMMU, then the BIOS is lying - * to us. Hope that the IOMMU for this device is actually - * disabled, and it needs no translation... - */ - rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); - if (rc) { - /* "can't" happen */ - dev_info(&pdev->dev, "failed to run vt-d quirk\n"); - return; - } - vtbar &= 0xffff0000; - - /* we know that the this iommu should be at offset 0xa000 from vtbar */ - drhd = dmar_find_matched_drhd_unit(pdev); - if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { - pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); - add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); - dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO); - } -} -DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); - static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -4049,12 +4063,8 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ drhd->gfx_dedicated = 1; - if (!dmar_map_gfx) { + if (!dmar_map_gfx) drhd->ignored = 1; - for_each_active_dev_scope(drhd->devices, - drhd->devices_cnt, i, dev) - dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO); - } } } @@ -5070,7 +5080,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) switch (type) { case IOMMU_DOMAIN_DMA: - /* fallthrough */ case IOMMU_DOMAIN_UNMANAGED: dmar_domain = alloc_domain(0); if (!dmar_domain) { diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 23583b0e66a5e12dc18cbb79dbc1b8ffa6dc7552..8f4ce72570ce34beb8d16c624cd6f7938771ff7d 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -508,12 +508,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index b4da396cce60401427d4e35a8aa552c3e47e56fc..2bfdd57348443221096a29005ee3a8a63abc0315 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -440,7 +440,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, default: dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", mem->subtype); - /* Fall-through */ + fallthrough; case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: region = iommu_alloc_resv_region(start, size, 0, IOMMU_RESV_RESERVED); diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index bb70b7177f94e770956a3f079e60ad62f7c9d782..bfc9719dbcdc31c423539c83c9c19517a291eb09 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -425,7 +425,7 @@ config GOLDFISH_PIC for Goldfish based virtual platforms. config QCOM_PDC - tristate "QCOM PDC" + bool "QCOM PDC" depends on ARCH_QCOM select IRQ_DOMAIN_HIERARCHY help diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 95f097448f9719384763c67819c7e93a074c7779..548de7538632a0491ec438f8f67dd18ef750d940 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2737,7 +2737,7 @@ static bool allocate_vpe_l2_table(int cpu, u32 id) switch (gpsz) { default: WARN_ON(1); - /* fall through */ + fallthrough; case GIC_PAGE_SIZE_4K: psz = SZ_4K; break; @@ -2832,7 +2832,7 @@ static int allocate_vpe_l1_table(void) switch (gpsz) { default: gpsz = GIC_PAGE_SIZE_4K; - /* fall through */ + fallthrough; case GIC_PAGE_SIZE_4K: psz = SZ_4K; break; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 324f280ff606614d5d204ee54194f571f924113b..850842f27beeec8229c973ff2602618ba0ed74d8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -965,10 +965,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP0R3_EL1); write_gicreg(0, ICC_AP0R2_EL1); - /* Fall through */ + fallthrough; case 6: write_gicreg(0, ICC_AP0R1_EL1); - /* Fall through */ + fallthrough; case 5: case 4: write_gicreg(0, ICC_AP0R0_EL1); @@ -982,10 +982,10 @@ static void gic_cpu_sys_reg_init(void) case 7: write_gicreg(0, ICC_AP1R3_EL1); write_gicreg(0, ICC_AP1R2_EL1); - /* Fall through */ + fallthrough; case 6: write_gicreg(0, ICC_AP1R1_EL1); - /* Fall through */ + fallthrough; case 5: case 4: write_gicreg(0, ICC_AP1R0_EL1); diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 4f74c15c475557ab2c2c6dd88634df077109e7d6..7031ef44de4f30c0bdbc3315e1b760dddbb68055 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -259,7 +259,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, case 4: writel_relaxed(~0, reg + GPC_IMR1_CORE2); writel_relaxed(~0, reg + GPC_IMR1_CORE3); - /* fall through */ + fallthrough; case 2: writel_relaxed(~0, reg + GPC_IMR1_CORE0); writel_relaxed(~0, reg + GPC_IMR1_CORE1); diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c index 9f3da4260ca658e9a711feeefef1ecc00ce99dde..b61a8901ef72211df72384cd14475267356fa3f4 100644 --- a/drivers/irqchip/irq-ingenic.c +++ b/drivers/irqchip/irq-ingenic.c @@ -125,7 +125,7 @@ static int __init ingenic_intc_of_init(struct device_node *node, irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK); } - if (request_irq(parent_irq, intc_cascade, 0, + if (request_irq(parent_irq, intc_cascade, IRQF_NO_SUSPEND, "SoC intc cascade interrupt", NULL)) pr_err("Failed to register SoC intc cascade interrupt\n"); return 0; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index aacfa012c082fb8321cc1c8e0eec546153d87832..215885962bb0abd7e164152d55e2bdb5e80c32e7 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -480,7 +480,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, case GIC_LOCAL_INT_TIMER: /* CONFIG_MIPS_CMP workaround (see __gic_init) */ map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; - /* fall-through */ + fallthrough; case GIC_LOCAL_INT_PERFCTR: case GIC_LOCAL_INT_FDC: /* diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c index 62a61275aaa315ff39331f9700996f89a387364e..69ba8ce3c17856be68c9330323f59acb00d55f78 100644 --- a/drivers/irqchip/irq-mtk-cirq.c +++ b/drivers/irqchip/irq-mtk-cirq.c @@ -295,6 +295,4 @@ static int __init mtk_cirq_of_init(struct device_node *node, return ret; } -IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_cirq) -IRQCHIP_MATCH("mediatek,mtk-cirq", mtk_cirq_of_init) -IRQCHIP_PLATFORM_DRIVER_END(mtk_cirq) +IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init); diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 7299c5ab4d107819d68d63fa6e18bc53af1cbaba..6ff98b87e5c043344dda7d2214cf67978112ff65 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -231,6 +231,4 @@ static int __init mtk_sysirq_of_init(struct device_node *node, kfree(chip_data); return ret; } -IRQCHIP_PLATFORM_DRIVER_BEGIN(mtk_sysirq) -IRQCHIP_MATCH("mediatek,mt6577-sysirq", mtk_sysirq_of_init) -IRQCHIP_PLATFORM_DRIVER_END(mtk_sysirq) +IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 03a36be757d8111234cf534f6215addbdf96e510..0c2c61db26b45dfaf1bb995c89e9cdcdeb276431 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -416,6 +416,16 @@ static void stm32_irq_ack(struct irq_data *d) irq_gc_unlock(gc); } +/* directly set the target bit without reading first. */ +static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(val, base + reg); +} + static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) { struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); @@ -449,9 +459,9 @@ static void stm32_exti_h_eoi(struct irq_data *d) raw_spin_lock(&chip_data->rlock); - stm32_exti_set_bit(d, stm32_bank->rpr_ofst); + stm32_exti_write_bit(d, stm32_bank->rpr_ofst); if (stm32_bank->fpr_ofst != UNDEF_REG) - stm32_exti_set_bit(d, stm32_bank->fpr_ofst); + stm32_exti_write_bit(d, stm32_bank->fpr_ofst); raw_spin_unlock(&chip_data->rlock); diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index b7cc5d6580d8fc89cea3e16d21345c396749176b..d4e97605456bba1c3958fb0affb276227b044114 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -83,6 +84,7 @@ struct ti_sci_inta_vint_desc { * @vint_mutex: Mutex to protect vint_list * @base: Base address of the memory mapped IO registers * @pdev: Pointer to platform device. + * @ti_sci_id: TI-SCI device identifier */ struct ti_sci_inta_irq_domain { const struct ti_sci_handle *sci; @@ -93,6 +95,7 @@ struct ti_sci_inta_irq_domain { struct mutex vint_mutex; void __iomem *base; struct platform_device *pdev; + u32 ti_sci_id; }; #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \ @@ -128,6 +131,37 @@ static void ti_sci_inta_irq_handler(struct irq_desc *desc) chained_irq_exit(irq_desc_get_chip(desc), desc); } +/** + * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq. + * @inta: IRQ domain corresponding to Interrupt Aggregator + * @irq: Hardware irq corresponding to the above irq domain + * + * Return parent irq number if translation is available else -ENOENT. + */ +static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta, + u16 vint_id) +{ + struct device_node *np = dev_of_node(&inta->pdev->dev); + u32 base, parent_base, size; + const __be32 *range; + int len; + + range = of_get_property(np, "ti,interrupt-ranges", &len); + if (!range) + return vint_id; + + for (len /= sizeof(*range); len >= 3; len -= 3) { + base = be32_to_cpu(*range++); + parent_base = be32_to_cpu(*range++); + size = be32_to_cpu(*range++); + + if (base <= vint_id && vint_id < base + size) + return vint_id - base + parent_base; + } + + return -ENOENT; +} + /** * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator * @domain: IRQ domain corresponding to Interrupt Aggregator @@ -139,30 +173,52 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom struct ti_sci_inta_irq_domain *inta = domain->host_data; struct ti_sci_inta_vint_desc *vint_desc; struct irq_fwspec parent_fwspec; + struct device_node *parent_node; unsigned int parent_virq; - u16 vint_id; + u16 vint_id, p_hwirq; + int ret; vint_id = ti_sci_get_free_resource(inta->vint); if (vint_id == TI_SCI_RESOURCE_NULL) return ERR_PTR(-EINVAL); + p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id); + if (p_hwirq < 0) { + ret = p_hwirq; + goto free_vint; + } + vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL); - if (!vint_desc) - return ERR_PTR(-ENOMEM); + if (!vint_desc) { + ret = -ENOMEM; + goto free_vint; + } vint_desc->domain = domain; vint_desc->vint_id = vint_id; INIT_LIST_HEAD(&vint_desc->list); - parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev))); - parent_fwspec.param_count = 2; - parent_fwspec.param[0] = inta->pdev->id; - parent_fwspec.param[1] = vint_desc->vint_id; + parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev)); + parent_fwspec.fwnode = of_node_to_fwnode(parent_node); + + if (of_device_is_compatible(parent_node, "arm,gic-v3")) { + /* Parent is GIC */ + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = p_hwirq - 32; + parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH; + } else { + /* Parent is Interrupt Router */ + parent_fwspec.param_count = 1; + parent_fwspec.param[0] = p_hwirq; + } parent_virq = irq_create_fwspec_mapping(&parent_fwspec); if (parent_virq == 0) { - kfree(vint_desc); - return ERR_PTR(-EINVAL); + dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n"); + ret = -EINVAL; + goto free_vint_desc; + } vint_desc->parent_virq = parent_virq; @@ -171,6 +227,11 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom ti_sci_inta_irq_handler, vint_desc); return vint_desc; +free_vint_desc: + kfree(vint_desc); +free_vint: + ti_sci_release_resource(inta->vint, vint_id); + return ERR_PTR(ret); } /** @@ -202,7 +263,7 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci, dev_id, dev_index, - inta->pdev->id, + inta->ti_sci_id, vint_desc->vint_id, event_desc->global_event, free_bit); @@ -299,7 +360,7 @@ static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc, inta->sci->ops.rm_irq_ops.free_event_map(inta->sci, HWIRQ_TO_DEVID(hwirq), HWIRQ_TO_IRQID(hwirq), - inta->pdev->id, + inta->ti_sci_id, vint_desc->vint_id, event_desc->global_event, event_desc->vint_bit); @@ -547,21 +608,21 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) return ret; } - ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id); + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id); if (ret) { dev_err(dev, "missing 'ti,sci-dev-id' property\n"); return -EINVAL; } - inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id, - "ti,sci-rm-range-vint"); + inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id, + TI_SCI_RESASG_SUBTYPE_IA_VINT); if (IS_ERR(inta->vint)) { dev_err(dev, "VINT resource allocation failed\n"); return PTR_ERR(inta->vint); } - inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id, - "ti,sci-rm-range-global-event"); + inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id, + TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT); if (IS_ERR(inta->global_event)) { dev_err(dev, "Global event resource allocation failed\n"); return PTR_ERR(inta->global_event); @@ -592,6 +653,8 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) INIT_LIST_HEAD(&inta->vint_list); mutex_init(&inta->vint_mutex); + dev_info(dev, "Interrupt Aggregator domain %d created\n", pdev->id); + return 0; } diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c index 5ea148faf2abc4f27bfca769ae6574b13903ce96..cbc1758228d9e91a98dd2ef8aaa13ff060d85907 100644 --- a/drivers/irqchip/irq-ti-sci-intr.c +++ b/drivers/irqchip/irq-ti-sci-intr.c @@ -17,29 +17,20 @@ #include #include -#define TI_SCI_DEV_ID_MASK 0xffff -#define TI_SCI_DEV_ID_SHIFT 16 -#define TI_SCI_IRQ_ID_MASK 0xffff -#define TI_SCI_IRQ_ID_SHIFT 0 -#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \ - (TI_SCI_DEV_ID_MASK)) -#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK)) -#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \ - TI_SCI_DEV_ID_SHIFT) | \ - ((index) & TI_SCI_IRQ_ID_MASK)) - /** * struct ti_sci_intr_irq_domain - Structure representing a TISCI based * Interrupt Router IRQ domain. * @sci: Pointer to TISCI handle - * @dst_irq: TISCI resource pointer representing GIC irq controller. - * @dst_id: TISCI device ID of the GIC irq controller. + * @out_irqs: TISCI resource pointer representing INTR irqs. + * @dev: Struct device pointer. + * @ti_sci_id: TI-SCI device identifier * @type: Specifies the trigger type supported by this Interrupt Router */ struct ti_sci_intr_irq_domain { const struct ti_sci_handle *sci; - struct ti_sci_resource *dst_irq; - u32 dst_id; + struct ti_sci_resource *out_irqs; + struct device *dev; + u32 ti_sci_id; u32 type; }; @@ -70,15 +61,44 @@ static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain, { struct ti_sci_intr_irq_domain *intr = domain->host_data; - if (fwspec->param_count != 2) + if (fwspec->param_count != 1) return -EINVAL; - *hwirq = TO_HWIRQ(fwspec->param[0], fwspec->param[1]); + *hwirq = fwspec->param[0]; *type = intr->type; return 0; } +/** + * ti_sci_intr_xlate_irq() - Translate hwirq to parent's hwirq. + * @intr: IRQ domain corresponding to Interrupt Router + * @irq: Hardware irq corresponding to the above irq domain + * + * Return parent irq number if translation is available else -ENOENT. + */ +static int ti_sci_intr_xlate_irq(struct ti_sci_intr_irq_domain *intr, u32 irq) +{ + struct device_node *np = dev_of_node(intr->dev); + u32 base, pbase, size, len; + const __be32 *range; + + range = of_get_property(np, "ti,interrupt-ranges", &len); + if (!range) + return irq; + + for (len /= sizeof(*range); len >= 3; len -= 3) { + base = be32_to_cpu(*range++); + pbase = be32_to_cpu(*range++); + size = be32_to_cpu(*range++); + + if (base <= irq && irq < base + size) + return irq - base + pbase; + } + + return -ENOENT; +} + /** * ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain. * @domain: Domain to which the irqs belong @@ -89,66 +109,76 @@ static void ti_sci_intr_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct ti_sci_intr_irq_domain *intr = domain->host_data; - struct irq_data *data, *parent_data; - u16 dev_id, irq_index; + struct irq_data *data; + int out_irq; - parent_data = irq_domain_get_irq_data(domain->parent, virq); data = irq_domain_get_irq_data(domain, virq); - irq_index = HWIRQ_TO_IRQID(data->hwirq); - dev_id = HWIRQ_TO_DEVID(data->hwirq); + out_irq = (uintptr_t)data->chip_data; - intr->sci->ops.rm_irq_ops.free_irq(intr->sci, dev_id, irq_index, - intr->dst_id, parent_data->hwirq); - ti_sci_release_resource(intr->dst_irq, parent_data->hwirq); + intr->sci->ops.rm_irq_ops.free_irq(intr->sci, + intr->ti_sci_id, data->hwirq, + intr->ti_sci_id, out_irq); + ti_sci_release_resource(intr->out_irqs, out_irq); irq_domain_free_irqs_parent(domain, virq, 1); irq_domain_reset_irq_data(data); } /** - * ti_sci_intr_alloc_gic_irq() - Allocate GIC specific IRQ + * ti_sci_intr_alloc_parent_irq() - Allocate parent IRQ * @domain: Pointer to the interrupt router IRQ domain * @virq: Corresponding Linux virtual IRQ number * @hwirq: Corresponding hwirq for the IRQ within this IRQ domain * - * Returns 0 if all went well else appropriate error pointer. + * Returns parent irq if all went well else appropriate error pointer. */ -static int ti_sci_intr_alloc_gic_irq(struct irq_domain *domain, - unsigned int virq, u32 hwirq) +static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, + unsigned int virq, u32 hwirq) { struct ti_sci_intr_irq_domain *intr = domain->host_data; + struct device_node *parent_node; struct irq_fwspec fwspec; - u16 dev_id, irq_index; - u16 dst_irq; - int err; - - dev_id = HWIRQ_TO_DEVID(hwirq); - irq_index = HWIRQ_TO_IRQID(hwirq); + u16 out_irq, p_hwirq; + int err = 0; - dst_irq = ti_sci_get_free_resource(intr->dst_irq); - if (dst_irq == TI_SCI_RESOURCE_NULL) + out_irq = ti_sci_get_free_resource(intr->out_irqs); + if (out_irq == TI_SCI_RESOURCE_NULL) return -EINVAL; - fwspec.fwnode = domain->parent->fwnode; - fwspec.param_count = 3; - fwspec.param[0] = 0; /* SPI */ - fwspec.param[1] = dst_irq - 32; /* SPI offset */ - fwspec.param[2] = intr->type; + p_hwirq = ti_sci_intr_xlate_irq(intr, out_irq); + if (p_hwirq < 0) + goto err_irqs; + + parent_node = of_irq_find_parent(dev_of_node(intr->dev)); + fwspec.fwnode = of_node_to_fwnode(parent_node); + + if (of_device_is_compatible(parent_node, "arm,gic-v3")) { + /* Parent is GIC */ + fwspec.param_count = 3; + fwspec.param[0] = 0; /* SPI */ + fwspec.param[1] = p_hwirq - 32; /* SPI offset */ + fwspec.param[2] = intr->type; + } else { + /* Parent is Interrupt Router */ + fwspec.param_count = 1; + fwspec.param[0] = p_hwirq; + } err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (err) goto err_irqs; - err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, dev_id, irq_index, - intr->dst_id, dst_irq); + err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, + intr->ti_sci_id, hwirq, + intr->ti_sci_id, out_irq); if (err) goto err_msg; - return 0; + return p_hwirq; err_msg: irq_domain_free_irqs_parent(domain, virq, 1); err_irqs: - ti_sci_release_resource(intr->dst_irq, dst_irq); + ti_sci_release_resource(intr->out_irqs, out_irq); return err; } @@ -168,18 +198,19 @@ static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain, struct irq_fwspec *fwspec = data; unsigned long hwirq; unsigned int flags; - int err; + int err, p_hwirq; err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags); if (err) return err; - err = ti_sci_intr_alloc_gic_irq(domain, virq, hwirq); - if (err) - return err; + p_hwirq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq); + if (p_hwirq < 0) + return p_hwirq; irq_domain_set_hwirq_and_chip(domain, virq, hwirq, - &ti_sci_intr_irq_chip, NULL); + &ti_sci_intr_irq_chip, + (void *)(uintptr_t)p_hwirq); return 0; } @@ -214,6 +245,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) if (!intr) return -ENOMEM; + intr->dev = dev; ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type", &intr->type); if (ret) { @@ -230,19 +262,19 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) return ret; } - ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dst-id", - &intr->dst_id); + ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dev-id", + &intr->ti_sci_id); if (ret) { - dev_err(dev, "missing 'ti,sci-dst-id' property\n"); + dev_err(dev, "missing 'ti,sci-dev-id' property\n"); return -EINVAL; } - intr->dst_irq = devm_ti_sci_get_of_resource(intr->sci, dev, - intr->dst_id, - "ti,sci-rm-range-girq"); - if (IS_ERR(intr->dst_irq)) { + intr->out_irqs = devm_ti_sci_get_resource(intr->sci, dev, + intr->ti_sci_id, + TI_SCI_RESASG_SUBTYPE_IR_OUTPUT); + if (IS_ERR(intr->out_irqs)) { dev_err(dev, "Destination irq resource allocation failed\n"); - return PTR_ERR(intr->dst_irq); + return PTR_ERR(intr->out_irqs); } domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev), @@ -252,6 +284,8 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) return -ENOMEM; } + dev_info(dev, "Interrupt Router %d domain created\n", intr->ti_sci_id); + return 0; } diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index bc235db8a4c5bcf246ad2eff6342b40fdb19cc8b..e46036374227297ed9aa1185724df3a735d8bbec 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -455,7 +455,7 @@ static void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, return; default: printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n"); - /* fall through */ + fallthrough; case AMBA_VENDOR_ARM: break; } diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c index 1bb0e36c2bf3dbd5ee07f9fa1588d956deb60177..d2341153e1814d12623c7b14ea62e939031a7e66 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -52,7 +52,7 @@ int platform_irqchip_probe(struct platform_device *pdev) * interrupt controller. The actual initialization callback of this * interrupt controller can check for specific domains as necessary. */ - if (par_np && !irq_find_matching_host(np, DOMAIN_BUS_ANY)) + if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) return -EPROBE_DEFER; return irq_init_cb(np, par_np); diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index c1c5dfad57ccd0d729d578a777b281739cc5201d..6ae9e1f0819da1615f80fbfb6eda00c348b9c256 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -11,11 +11,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -432,8 +430,4 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent) return ret; } -IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc) -IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init) -IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc) -MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller"); -MODULE_LICENSE("GPL v2"); +IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init); diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index ecc1ef6c386d8321082ffc7b5dc1afc0fc6714f5..f68569bfef7a711a53b1d9d48ba83c335c8b3fe7 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -348,7 +348,7 @@ modehdlc(struct bchannel *bch, int protocol) switch (protocol) { case -1: /* used for init */ bch->state = -1; - /* fall through */ + fallthrough; case ISDN_P_NONE: if (bch->state == ISDN_P_NONE) break; diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h index b0d772340e163d85217c2e19165b027c83a8984b..448ded8f9d24e384ae5f6fb11bf44a3a543703d7 100644 --- a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h +++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h @@ -121,7 +121,6 @@ setup_embedded(struct hfc_multi *hc, struct hm_map *m) case HFC_IO_MODE_EMBSD: test_and_set_bit(HFC_CHIP_EMBSD, &hc->chip); hc->slots = 128; /* required */ - /* fall through */ hc->HFC_outb = HFC_outb_embsd; hc->HFC_inb = HFC_inb_embsd; hc->HFC_inw = HFC_inw_embsd; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 904a4f4c5ff9eeb032b2b97b237fe923c65f698f..56bd2e9db6ed6a1bfe92173e0c987d0ca527b3b9 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1280,7 +1280,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) case (-1): /* used for init */ bch->state = -1; bch->nr = bc; - /* fall through */ + fallthrough; case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 4274906f86547f6d9b6cf9288f99233b960678f7..70061991915a5833acd33d6354ba08e60177ee20 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -695,7 +695,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol) switch (protocol) { case (-1): /* used for init */ bch->state = -1; - /* fall through */ + fallthrough; case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; /* already in idle state */ diff --git a/drivers/isdn/hardware/mISDN/isdnhdlc.c b/drivers/isdn/hardware/mISDN/isdnhdlc.c index 9fea16ed3dd86e71d6d752da1be914806073143a..985367e6711d4d8b4a22785f077a72874cac48af 100644 --- a/drivers/isdn/hardware/mISDN/isdnhdlc.c +++ b/drivers/isdn/hardware/mISDN/isdnhdlc.c @@ -397,7 +397,7 @@ int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, u16 slen, dsize--; break; } - /* fall through */ + fallthrough; case HDLC_SENDFLAG_ONE: if (hdlc->bit_shift == 8) { hdlc->cbin = hdlc->ffvalue >> diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index f4cb29766888457d0c06bdad1af9cda9034af17f..a16c7a2a7f3d0d22bf03713a58640db143440a25 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -875,7 +875,7 @@ release_card(struct inf_hw *card) { release_card(card->sc[i]); card->sc[i] = NULL; } - /* fall through */ + fallthrough; default: pci_disable_device(card->pdev); pci_set_drvdata(card->pdev, NULL); diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index 11e8c7d8b6e89d361f8709251722f8d2e9c598c1..56943409b60d944a21bb90e8d73b72286be7a16a 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -957,7 +957,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) { break; case PCTRL_CMD_FTM: p1 = 2; - /* fall through */ + fallthrough; case PCTRL_CMD_FTH: send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_SILON, 1, &p1); @@ -1163,7 +1163,7 @@ setup_pump(struct isar_ch *ch) { send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, PMOD_DTMF, 1, param); } - /* fall through */ + fallthrough; case ISDN_P_B_MODEM_ASYNC: ctrl = PMOD_DATAMODEM; if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) { @@ -1255,7 +1255,7 @@ setup_iom2(struct isar_ch *ch) { case ISDN_P_B_MODEM_ASYNC: case ISDN_P_B_T30_FAX: cmsb |= IOM_CTRL_RCV; - /* fall through */ + fallthrough; case ISDN_P_B_L2DTMF: if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) cmsb |= IOM_CTRL_RCV; @@ -1548,7 +1548,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) ich->is->name, hh->id); ret = -EINVAL; } - /* fall through */ + fallthrough; default: pr_info("%s: %s unknown prim(%x,%x)\n", ich->is->name, __func__, hh->prim, hh->id); diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 27aa32914425d5d3b39e5047237c71e2bc5ce486..c2f76f3986134e4dc2f82a49195be0fbd6789314 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -528,7 +528,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, rq.protocol = ISDN_P_NT_S0; if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) rq.protocol = ISDN_P_NT_E1; - /* fall through */ + fallthrough; case ISDN_P_LAPD_TE: ch->recv = mISDN_queue_message; ch->peer = &dev->D.st->own; diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index b413bafe93fddd97a9ee85ba29b1007490ad8b49..97c68731406b34d32f469976e38a0ffdab0cbaba 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -301,7 +301,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) switch (type) { case PBLK_WRITE: kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap); - /* fall through */ + fallthrough; case PBLK_WRITE_INT: pool = &pblk->w_rq_pool; break; diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c index 75482eeab2c4da08f7061808ec44d3022686427b..994ba5cb367898060076cdd52a47d043f9380a00 100644 --- a/drivers/macintosh/adbhid.c +++ b/drivers/macintosh/adbhid.c @@ -881,7 +881,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id, } if (hid->name[0]) break; - /* else fall through */ + fallthrough; default: pr_info("Trying to register unknown ADB device to input layer.\n"); diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 23f1f41c86029092a01d662c1a362552cffa3e04..96684581a25df174b3dc56218a36730e27f246a7 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -852,7 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd) break; case SMU_I2C_TRANSFER_COMBINED: cmd->info.devaddr &= 0xfe; - /* fall through */ + fallthrough; case SMU_I2C_TRANSFER_STDSUB: if (cmd->info.sublen > 3) return -EINVAL; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 77fbfd52edcf99d6e3a35d2b9adc9d2b638c6088..c1227bdb57e7f110b32025248431c545da2bd170 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -608,7 +608,7 @@ static void do_journal_discard(struct cache *ca) ca->sb.njournal_buckets; atomic_set(&ja->discard_in_flight, DISCARD_READY); - /* fallthrough */ + fallthrough; case DISCARD_READY: if (ja->discard_idx == ja->last_idx) diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 62fb917f7a4f08231656b1abb6fa18594012f7ff..ae380bc3992e3c5caaca7945ee4ccc71dae3debe 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -33,27 +33,27 @@ int bch_ ## name ## _h(const char *cp, type *res) \ case 'y': \ case 'z': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'e': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'p': \ u++; \ - /* fall through */ \ + fallthrough; \ case 't': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'g': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'm': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'k': \ u++; \ if (e++ == cp) \ return -EINVAL; \ - /* fall through */ \ + fallthrough; \ case '\n': \ case '\0': \ if (*e == '\n') \ diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 151aa95775be2daee11c7721eb62dde28ec702b1..af6d4f898e4c1de8d9b72f053beb454d5617a1e8 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 14896072125464ea9b3b9123f8b3fe4677bd8616..380386c36921f799a236f8d2701f3314f8b8e4fc 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -739,7 +739,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); struct skcipher_request *req; struct scatterlist src, dst; - struct crypto_wait wait; + DECLARE_CRYPTO_WAIT(wait); int err; req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); @@ -936,7 +936,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d u8 *es, *ks, *data, *data2, *data_offset; struct skcipher_request *req; struct scatterlist *sg, *sg2, src, dst; - struct crypto_wait wait; + DECLARE_CRYPTO_WAIT(wait); int i, r; req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); @@ -1552,7 +1552,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc, case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); - /* fall through */ + fallthrough; /* * The request is queued and processed asynchronously, * completion function kcryptd_async_done() will be called. diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 8c8d940e532efa6d3074ada12550c65cf8778bb8..3fc3757def55ec29466a0ecddf4c197d6586c6de 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2487,6 +2487,7 @@ static void integrity_recalc(struct work_struct *w) range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { if (ic->mode == 'B') { + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); } @@ -2564,6 +2565,17 @@ static void integrity_recalc(struct work_struct *w) goto err; } + if (ic->mode == 'B') { + sector_t start, end; + start = (range.logical_sector >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + end = ((range.logical_sector + range.n_sectors) >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); + } + advance_and_next: cond_resched(); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 53645a6f474cb4a344725e0776d42b86e3609113..de4da825ade6083902966590e9e19ec610f548ce 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1287,17 +1287,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) static void flush_multipath_work(struct multipath *m) { if (m->hw_handler_name) { - set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + unsigned long flags; + + if (!atomic_read(&m->pg_init_in_progress)) + goto skip; + + spin_lock_irqsave(&m->lock, flags); + if (atomic_read(&m->pg_init_in_progress) && + !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { + spin_unlock_irqrestore(&m->lock, flags); - if (atomic_read(&m->pg_init_in_progress)) flush_workqueue(kmpath_handlerd); - multipath_wait_for_pg_init_completion(m); + multipath_wait_for_pg_init_completion(m); - clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + spin_lock_irqsave(&m->lock, flags); + clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); + } + spin_unlock_irqrestore(&m->lock, flags); } - +skip: if (m->queue_mode == DM_TYPE_BIO_BASED) flush_work(&m->process_queued_bios); flush_work(&m->trigger_event); @@ -1554,7 +1562,7 @@ static void pg_init_done(void *data, int errors) case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = true; - /* fall through */ + fallthrough; case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 76b6b323bf4bd560f1b83eead235ffcaf373ad86..b461836b6d26387551e01be03273047efe01e679 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } @@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) } pmd_write_lock_in_core(pmd); - if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { + if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 86dbe0c8b45c60314b1d5113b4ab7c38d6b6e788..6271d1e741cf7a478abaf691d7b4f115e2c9a678 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -231,6 +231,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -245,9 +246,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -269,7 +277,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 32fa6499739fb928540898d8ca7e5e41119b7c04..fb0255d25e4b2ea29e05c2727b067237326858ad 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1021,7 +1021,7 @@ static void clone_endio(struct bio *bio) switch (r) { case DM_ENDIO_REQUEUE: error = BLK_STS_DM_REQUEUE; - /*FALLTHRU*/ + fallthrough; case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c index 6bbec89976a748d7b57475b05a3b9a95dd180110..2cf973722f59624f478875151169f6bc26f1e58c 100644 --- a/drivers/md/md-autodetect.c +++ b/drivers/md/md-autodetect.c @@ -102,10 +102,10 @@ static int __init md_setup(char *str) pername = "raid0"; break; } - /* FALL THROUGH */ + fallthrough; case 1: /* the first device is numeric */ str = str1; - /* FALL THROUGH */ + fallthrough; case 0: md_setup_args[ent].level = LEVEL_NONE; pername="super-block"; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d61b524ae440139c891bd9cc970faa440b5adee1..b10c51988c8ee11aa9e388f3de1bd1d6131a0933 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1433,7 +1433,7 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s case 0: md_bitmap_file_set_bit(bitmap, offset); md_bitmap_count_page(&bitmap->counts, offset, 1); - /* fall through */ + fallthrough; case 1: *bmc = 2; } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 749ec268d957d60a7ecb25e6372a633f644d10ec..54c089a50b152e4fa42aee04b973d89095a8ae5d 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); @@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); @@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock); int dm_bm_flush(struct dm_block_manager *bm) { - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; return dm_bufio_write_dirty_buffers(bm->bufio); @@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) bool dm_bm_is_read_only(struct dm_block_manager *bm) { - return bm->read_only; + return (bm ? bm->read_only : true); } EXPORT_SYMBOL_GPL(dm_bm_is_read_only); void dm_bm_set_read_only(struct dm_block_manager *bm) { - bm->read_only = true; + if (bm) + bm->read_only = true; } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); void dm_bm_set_read_write(struct dm_block_manager *bm) { - bm->read_only = false; + if (bm) + bm->read_only = false; } EXPORT_SYMBOL_GPL(dm_bm_set_read_write); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ef0fd4830803f00c1142ee7feebb55bf91840b26..225380efd1e24fd63ac71da9ca31d93775d4946a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4083,7 +4083,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, break; } dev = &sh->dev[s->failed_num[0]]; - /* fall through */ + fallthrough; case check_state_compute_result: sh->check_state = check_state_idle; if (!dev) @@ -4214,7 +4214,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, /* we have 2-disk failure */ BUG_ON(s->failed != 2); - /* fall through */ + fallthrough; case check_state_compute_result: sh->check_state = check_state_idle; @@ -6514,9 +6514,12 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) /* * The value should not be bigger than PAGE_SIZE. It requires to - * be multiple of DEFAULT_STRIPE_SIZE. + * be multiple of DEFAULT_STRIPE_SIZE and the value should be power + * of two. */ - if (new % DEFAULT_STRIPE_SIZE != 0 || new > PAGE_SIZE || new == 0) + if (new % DEFAULT_STRIPE_SIZE != 0 || + new > PAGE_SIZE || new == 0 || + new != roundup_pow_of_two(new)) return -EINVAL; err = mddev_lock(mddev); diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 630a75e0eeb16dab7fcec750408f71a775789690..7607b516a7c438368bf1c6ecfbd844b2d9a024fd 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -210,7 +210,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) tpg->vdownsampling[1] = 1; tpg->hdownsampling[1] = 1; tpg->planes = 2; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGB332: case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_RGB565X: @@ -271,7 +271,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_YUV420M: case V4L2_PIX_FMT_YVU420M: tpg->buffers = 3; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YVU420: tpg->vdownsampling[1] = 2; @@ -284,7 +284,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_YUV422M: case V4L2_PIX_FMT_YVU422M: tpg->buffers = 3; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV422P: tpg->vdownsampling[1] = 1; tpg->vdownsampling[2] = 1; @@ -296,7 +296,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_NV16M: case V4L2_PIX_FMT_NV61M: tpg->buffers = 2; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: tpg->vdownsampling[1] = 1; @@ -308,7 +308,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_NV12M: case V4L2_PIX_FMT_NV21M: tpg->buffers = 2; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: tpg->vdownsampling[1] = 2; @@ -1275,7 +1275,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB444: case V4L2_PIX_FMT_XRGB444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV444: case V4L2_PIX_FMT_ARGB444: buf[0][offset] = (g_u_s << 4) | b_v; @@ -1283,21 +1283,21 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_RGBX444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGBA444: buf[0][offset] = (b_v << 4) | (alpha >> 4); buf[0][offset + 1] = (r_y_h << 4) | g_u_s; break; case V4L2_PIX_FMT_XBGR444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ABGR444: buf[0][offset] = (g_u_s << 4) | r_y_h; buf[0][offset + 1] = (alpha & 0xf0) | b_v; break; case V4L2_PIX_FMT_BGRX444: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_BGRA444: buf[0][offset] = (r_y_h << 4) | (alpha >> 4); buf[0][offset + 1] = (b_v << 4) | g_u_s; @@ -1305,7 +1305,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB555: case V4L2_PIX_FMT_XRGB555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV555: case V4L2_PIX_FMT_ARGB555: buf[0][offset] = (g_u_s << 5) | b_v; @@ -1314,7 +1314,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_RGBX555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGBA555: buf[0][offset] = (g_u_s << 6) | (b_v << 1) | ((alpha & 0x80) >> 7); @@ -1322,7 +1322,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_XBGR555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ABGR555: buf[0][offset] = (g_u_s << 5) | r_y_h; buf[0][offset + 1] = (alpha & 0x80) | (b_v << 2) @@ -1330,7 +1330,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_BGRX555: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_BGRA555: buf[0][offset] = (g_u_s << 6) | (r_y_h << 1) | ((alpha & 0x80) >> 7); @@ -1339,7 +1339,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB555X: case V4L2_PIX_FMT_XRGB555X: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ARGB555X: buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3); buf[0][offset + 1] = (g_u_s << 5) | b_v; @@ -1366,7 +1366,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_HSV32: case V4L2_PIX_FMT_XYUV32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_YUV32: case V4L2_PIX_FMT_ARGB32: case V4L2_PIX_FMT_AYUV32: @@ -1377,7 +1377,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_RGBX32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGBA32: buf[0][offset] = r_y_h; buf[0][offset + 1] = g_u_s; @@ -1388,7 +1388,7 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_XBGR32: case V4L2_PIX_FMT_VUYX32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_ABGR32: case V4L2_PIX_FMT_VUYA32: buf[0][offset] = b_v; @@ -1398,7 +1398,7 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_BGRX32: alpha = 0; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_BGRA32: buf[0][offset] = alpha; buf[0][offset + 1] = b_v; diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 630509ecee2051831da6011112a23f72b8930849..89620da983bab0fcb0ee8a78f3c0228e1907a3aa 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -546,7 +546,7 @@ static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h) h->priv->ule_sndu_type_1 = 1; h->ts_remain -= 1; h->from_where += 1; - /* fallthrough */ + fallthrough; case 0: h->new_ts = 1; h->ts += TS_SZ; diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c index e92542b92d3492430d56fdb9c71837a529d25f4f..da0ff7b44da41aed330693d1d947e1e553856ff5 100644 --- a/drivers/media/dvb-frontends/bcm3510.c +++ b/drivers/media/dvb-frontends/bcm3510.c @@ -773,7 +773,7 @@ static int bcm3510_init(struct dvb_frontend* fe) deb_info("attempting to download firmware\n"); if ((ret = bcm3510_init_cold(st)) < 0) return ret; - /* fall-through */ + fallthrough; case JDEC_EEPROM_LOAD_WAIT: deb_info("firmware is loaded\n"); bcm3510_check_firmware_version(st); diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c index bc374750529b72f13c70fa8bfcf44175d1282cf4..08a85831e917f301a762fdc4012d951b68ca3c62 100644 --- a/drivers/media/dvb-frontends/dib0090.c +++ b/drivers/media/dvb-frontends/dib0090.c @@ -1693,7 +1693,7 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front if (state->identity.p1g) state->dc = dc_p1g_table; - /* fall through */ + fallthrough; case CT_TUNER_STEP_0: dprintk("Start/continue DC calibration for %s path\n", (state->dc->i == 1) ? "I" : "Q"); diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c index 0f0480d8576d77264fc8815d4df888972b790c57..a6c2fc4586eb36bda9beea1736960c3297f03473 100644 --- a/drivers/media/dvb-frontends/dib3000mb.c +++ b/drivers/media/dvb-frontends/dib3000mb.c @@ -224,7 +224,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner) switch (c->hierarchy) { case HIERARCHY_NONE: deb_setf("hierarchy: none\n"); - /* fall through */ + fallthrough; case HIERARCHY_1: deb_setf("hierarchy: alpha=1\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1); diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 0a7790c4bad39ce5ac5ebfcff9d3a419f57da8cb..55bee50aa8716e2dccbe5225ca4efde45eefce0a 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -276,7 +276,7 @@ static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_p if (state->version != SOC7090) reg_1280 &= ~((1 << 11)); reg_1280 &= ~(1 << 6); - /* fall-through */ + fallthrough; case DIB7000P_POWER_INTERFACE_ONLY: /* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */ /* TODO power up either SDIO or I2C */ diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index 5de016412c42990578b6d1b5b7a911761a37e038..237b9d04c07661ad12fd148fd71555e9b95ec876 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -2306,7 +2306,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 pr_err("error %d\n", rc); goto rw_error; } - /* fallthrough */ + fallthrough; case SIO_HI_RA_RAM_CMD_BRDCTRL: rc = drxj_dap_write_reg16(dev_addr, SIO_HI_RA_RAM_PAR_2__A, cmd->param2, 0); if (rc != 0) { @@ -2318,7 +2318,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 pr_err("error %d\n", rc); goto rw_error; } - /* fallthrough */ + fallthrough; case SIO_HI_RA_RAM_CMD_NULL: /* No parameters */ break; @@ -2841,7 +2841,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o /* coef = 188/204 */ max_bit_rate = (ext_attr->curr_symbol_rate / 8) * nr_bits * 188; - /* fall-through - as b/c Annex A/C need following settings */ + fallthrough; /* as b/c Annex A/C need following settings */ case DRX_STANDARD_ITU_B: rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0); if (rc != 0) { @@ -3555,8 +3555,8 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg if (!ext_attr->has_smatx) return -EIO; switch (uio_cfg->mode) { - case DRX_UIO_MODE_FIRMWARE_SMA: /* fall through */ - case DRX_UIO_MODE_FIRMWARE_SAW: /* fall through */ + case DRX_UIO_MODE_FIRMWARE_SMA: + case DRX_UIO_MODE_FIRMWARE_SAW: case DRX_UIO_MODE_READWRITE: ext_attr->uio_sma_tx_mode = uio_cfg->mode; break; @@ -3579,7 +3579,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg if (!ext_attr->has_smarx) return -EIO; switch (uio_cfg->mode) { - case DRX_UIO_MODE_FIRMWARE0: /* fall through */ + case DRX_UIO_MODE_FIRMWARE0: case DRX_UIO_MODE_READWRITE: ext_attr->uio_sma_rx_mode = uio_cfg->mode; break; @@ -3603,7 +3603,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg if (!ext_attr->has_gpio) return -EIO; switch (uio_cfg->mode) { - case DRX_UIO_MODE_FIRMWARE0: /* fall through */ + case DRX_UIO_MODE_FIRMWARE0: case DRX_UIO_MODE_READWRITE: ext_attr->uio_gpio_mode = uio_cfg->mode; break; @@ -3639,7 +3639,7 @@ static int ctrl_set_uio_cfg(struct drx_demod_instance *demod, struct drxuio_cfg } ext_attr->uio_irqn_mode = uio_cfg->mode; break; - case DRX_UIO_MODE_FIRMWARE0: /* fall through */ + case DRX_UIO_MODE_FIRMWARE0: default: return -EINVAL; break; @@ -4004,31 +4004,36 @@ static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 4: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_3__A, *(cmd->parameter + 3), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 3: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_2__A, *(cmd->parameter + 2), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 2: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_1__A, *(cmd->parameter + 1), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 1: rc = drxj_dap_write_reg16(dev_addr, SCU_RAM_PARAM_0__A, *(cmd->parameter + 0), 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 0: /* do nothing */ break; @@ -4068,25 +4073,29 @@ static int scu_command(struct i2c_device_addr *dev_addr, struct drxjscu_cmd *cmd if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 3: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_2__A, cmd->result + 2, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 2: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_1__A, cmd->result + 1, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 1: rc = drxj_dap_read_reg16(dev_addr, SCU_RAM_PARAM_0__A, cmd->result + 0, 0); if (rc != 0) { pr_err("error %d\n", rc); goto rw_error; - } /* fallthrough */ + } + fallthrough; case 0: /* do nothing */ break; @@ -4791,7 +4800,7 @@ set_frequency(struct drx_demod_instance *demod, Sound carrier is already 3Mhz above centre frequency due to tuner setting so now add an extra shift of 1MHz... */ fm_frequency_shift = 1000; - /*fall through */ + fallthrough; case DRX_STANDARD_ITU_B: case DRX_STANDARD_NTSC: case DRX_STANDARD_PAL_SECAM_BG: @@ -10475,11 +10484,11 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) (standard == DRX_STANDARD_NTSC)) { switch (channel->bandwidth) { case DRX_BANDWIDTH_6MHZ: - case DRX_BANDWIDTH_UNKNOWN: /* fall through */ + case DRX_BANDWIDTH_UNKNOWN: channel->bandwidth = DRX_BANDWIDTH_6MHZ; break; - case DRX_BANDWIDTH_8MHZ: /* fall through */ - case DRX_BANDWIDTH_7MHZ: /* fall through */ + case DRX_BANDWIDTH_8MHZ: + case DRX_BANDWIDTH_7MHZ: default: return -EINVAL; } @@ -10511,10 +10520,10 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) } switch (channel->constellation) { - case DRX_CONSTELLATION_QAM16: /* fall through */ - case DRX_CONSTELLATION_QAM32: /* fall through */ - case DRX_CONSTELLATION_QAM64: /* fall through */ - case DRX_CONSTELLATION_QAM128: /* fall through */ + case DRX_CONSTELLATION_QAM16: + case DRX_CONSTELLATION_QAM32: + case DRX_CONSTELLATION_QAM64: + case DRX_CONSTELLATION_QAM128: case DRX_CONSTELLATION_QAM256: bandwidth_temp = channel->symbolrate * bw_rolloff_factor; bandwidth = bandwidth_temp / 100; @@ -10628,8 +10637,8 @@ ctrl_set_channel(struct drx_demod_instance *demod, struct drx_channel *channel) } break; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: rc = set_qam_channel(demod, channel, tuner_freq_offset); if (rc != 0) { @@ -10820,7 +10829,7 @@ ctrl_lock_status(struct drx_demod_instance *demod, enum drx_lock_status *lock_st SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK; break; #endif - case DRX_STANDARD_UNKNOWN: /* fallthrough */ + case DRX_STANDARD_UNKNOWN: default: return -EIO; } @@ -10888,8 +10897,8 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) */ switch (prev_standard) { #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: rc = power_down_qam(demod, false); if (rc != 0) { @@ -10908,7 +10917,7 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) case DRX_STANDARD_UNKNOWN: /* Do nothing */ break; - case DRX_STANDARD_AUTO: /* fallthrough */ + case DRX_STANDARD_AUTO: default: return -EINVAL; } @@ -10921,8 +10930,8 @@ ctrl_set_standard(struct drx_demod_instance *demod, enum drx_standard *standard) switch (*standard) { #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: do { u16 dummy; @@ -11111,12 +11120,12 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode) goto rw_error; } break; - case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_L: /* fallthrough */ - case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */ - case DRX_STANDARD_NTSC: /* fallthrough */ + case DRX_STANDARD_PAL_SECAM_BG: + case DRX_STANDARD_PAL_SECAM_DK: + case DRX_STANDARD_PAL_SECAM_I: + case DRX_STANDARD_PAL_SECAM_L: + case DRX_STANDARD_PAL_SECAM_LP: + case DRX_STANDARD_NTSC: case DRX_STANDARD_FM: rc = power_down_atv(demod, ext_attr->standard, true); if (rc != 0) { @@ -11127,7 +11136,7 @@ ctrl_power_mode(struct drx_demod_instance *demod, enum drx_power_mode *mode) case DRX_STANDARD_UNKNOWN: /* Do nothing */ break; - case DRX_STANDARD_AUTO: /* fallthrough */ + case DRX_STANDARD_AUTO: default: return -EIO; } @@ -11220,8 +11229,8 @@ ctrl_set_cfg_pre_saw(struct drx_demod_instance *demod, struct drxj_cfg_pre_saw * ext_attr->vsb_pre_saw_cfg = *pre_saw; break; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: ext_attr->qam_pre_saw_cfg = *pre_saw; break; @@ -11264,10 +11273,10 @@ ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain ext_attr = (struct drxj_data *) demod->my_ext_attr; switch (afe_gain->standard) { - case DRX_STANDARD_8VSB: /* fallthrough */ + case DRX_STANDARD_8VSB: fallthrough; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: #endif /* Do nothing */ @@ -11301,8 +11310,8 @@ ctrl_set_cfg_afe_gain(struct drx_demod_instance *demod, struct drxj_cfg_afe_gain ext_attr->vsb_pga_cfg = gain * 13 + 140; break; #ifndef DRXJ_VSB_ONLY - case DRX_STANDARD_ITU_A: /* fallthrough */ - case DRX_STANDARD_ITU_B: /* fallthrough */ + case DRX_STANDARD_ITU_A: + case DRX_STANDARD_ITU_B: case DRX_STANDARD_ITU_C: ext_attr->qam_pga_cfg = gain * 13 + 140; break; diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index fae6f3763364aac9cf7983d43bf0a6e11ab4fe1b..45f9828639042c04b5e9a7cd2fabbc9e030f184c 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -1512,14 +1512,14 @@ static int SetDeviceTypeId(struct drxd_state *state) switch (deviceId) { case 4: state->diversity = 1; - /* fall through */ + fallthrough; case 3: case 7: state->PGA = 1; break; case 6: state->diversity = 1; - /* fall through */ + fallthrough; case 5: case 8: break; @@ -1966,7 +1966,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) switch (p->transmission_mode) { default: /* Not set, detect it automatically */ operationMode |= SC_RA_RAM_OP_AUTO_MODE__M; - /* fall through - try first guess DRX_FFTMODE_8K */ + fallthrough; /* try first guess DRX_FFTMODE_8K */ case TRANSMISSION_MODE_8K: transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K; if (state->type_A) { @@ -2139,7 +2139,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) switch (p->modulation) { default: operationMode |= SC_RA_RAM_OP_AUTO_CONST__M; - /* fall through - try first guess DRX_CONSTELLATION_QAM64 */ + fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */ case QAM_64: transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64; if (state->type_A) { @@ -2266,7 +2266,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) break; default: operationMode |= SC_RA_RAM_OP_AUTO_RATE__M; - /* fall through */ + fallthrough; case FEC_2_3: transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_2_3; if (state->type_A) @@ -2301,7 +2301,7 @@ static int DRX_Start(struct drxd_state *state, s32 off) switch (p->bandwidth_hz) { case 0: p->bandwidth_hz = 8000000; - /* fall through */ + fallthrough; case 8000000: /* (64/7)*(8/8)*1000000 */ bandwidth = DRXD_BANDWIDTH_8MHZ_IN_HZ; diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 0ae9d8c72d8dcaa26de2cbb615198e836bcee590..32f9346deb3e98fbe22a7afc63a4c1667344dfed 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -1756,7 +1756,7 @@ static int setoperation_mode(struct drxk_state *state, goto error; state->m_operation_mode = OM_NONE; break; - case OM_QAM_ITU_A: /* fallthrough */ + case OM_QAM_ITU_A: case OM_QAM_ITU_C: status = mpegts_stop(state); if (status < 0) @@ -1783,7 +1783,7 @@ static int setoperation_mode(struct drxk_state *state, if (status < 0) goto error; break; - case OM_QAM_ITU_A: /* fallthrough */ + case OM_QAM_ITU_A: case OM_QAM_ITU_C: dprintk(1, ": DVB-C Annex %c\n", (state->m_operation_mode == OM_QAM_ITU_A) ? 'A' : 'C'); @@ -2012,7 +2012,7 @@ static int mpegts_dto_setup(struct drxk_state *state, fec_oc_rcn_ctl_rate = 0xC00000; static_clk = state->m_dvbt_static_clk; break; - case OM_QAM_ITU_A: /* fallthrough */ + case OM_QAM_ITU_A: case OM_QAM_ITU_C: fec_oc_tmd_mode = 0x0004; fec_oc_rcn_ctl_rate = 0xD2B4EE; /* good for >63 Mb/s */ @@ -3249,11 +3249,11 @@ static int dvbt_sc_command(struct drxk_state *state, case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM: case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM: status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1); - /* fall through - All commands using 1 parameters */ + fallthrough; /* All commands using 1 parameters */ case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING: case OFDM_SC_RA_RAM_CMD_USER_IO: status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0); - /* fall through - All commands using 0 parameters */ + fallthrough; /* All commands using 0 parameters */ case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM: case OFDM_SC_RA_RAM_CMD_NULL: /* Write command */ @@ -3761,7 +3761,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, case TRANSMISSION_MODE_AUTO: default: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M; - /* fall through - try first guess DRX_FFTMODE_8K */ + fallthrough; /* try first guess DRX_FFTMODE_8K */ case TRANSMISSION_MODE_8K: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K; break; @@ -3775,7 +3775,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, default: case GUARD_INTERVAL_AUTO: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M; - /* fall through - try first guess DRX_GUARD_1DIV4 */ + fallthrough; /* try first guess DRX_GUARD_1DIV4 */ case GUARD_INTERVAL_1_4: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4; break; @@ -3798,7 +3798,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M; /* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */ /* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */ - /* fall through */ + fallthrough; case HIERARCHY_1: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1; break; @@ -3816,7 +3816,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, case QAM_AUTO: default: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M; - /* fall through - try first guess DRX_CONSTELLATION_QAM64 */ + fallthrough; /* try first guess DRX_CONSTELLATION_QAM64 */ case QAM_64: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64; break; @@ -3841,7 +3841,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, WR16(dev_addr, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI)); break; - case DRX_PRIORITY_UNKNOWN: /* fall through */ + case DRX_PRIORITY_UNKNOWN: default: status = -EINVAL; goto error; @@ -3859,7 +3859,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, case FEC_AUTO: default: operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M; - /* fall through - try first guess DRX_CODERATE_2DIV3 */ + fallthrough; /* try first guess DRX_CODERATE_2DIV3 */ case FEC_2_3: transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3; break; @@ -3893,7 +3893,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, switch (state->props.bandwidth_hz) { case 0: state->props.bandwidth_hz = 8000000; - /* fall through */ + fallthrough; case 8000000: bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ; status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index d3c330e035c45e42a2e84644d6152413313c2022..722576f1732aa733371aee191986995c52be01b6 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -768,7 +768,7 @@ static int lgdt3306a_set_if(struct lgdt3306a_state *state, default: pr_warn("IF=%d KHz is not supported, 3250 assumed\n", if_freq_khz); - /* fallthrough */ + fallthrough; case 3250: /* 3.25Mhz */ nco1 = 0x34; nco2 = 0x00; diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c index 881897583cf2d88fc6482df6a7158a0f424cbc54..399d5c519027e845b0f757d18dbf00e24b0d1f45 100644 --- a/drivers/media/dvb-frontends/mt352.c +++ b/drivers/media/dvb-frontends/mt352.c @@ -201,7 +201,7 @@ static int mt352_set_parameters(struct dvb_frontend *fe) if (op->hierarchy == HIERARCHY_AUTO || op->hierarchy == HIERARCHY_NONE) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c index 290b9eab099ff756227753f3bc220fb146118f09..4404ace82981ce46b07ba9f15885e78db8ae227b 100644 --- a/drivers/media/dvb-frontends/mxl5xx.c +++ b/drivers/media/dvb-frontends/mxl5xx.c @@ -739,7 +739,7 @@ static int get_frontend(struct dvb_frontend *fe, default: break; } - /* Fall through */ + fallthrough; case SYS_DVBS: switch ((enum MXL_HYDRA_MODULATION_E) reg_data[DMD_MODULATION_SCHEME_ADDR]) { diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c index 35a3e47497c272c78188149c623c449b3d78e601..24de1b115158387e298a307ec52b1b5a0c461c95 100644 --- a/drivers/media/dvb-frontends/or51132.c +++ b/drivers/media/dvb-frontends/or51132.c @@ -482,7 +482,7 @@ static int or51132_read_snr(struct dvb_frontend* fe, u16* snr) switch (reg&0xff) { case 0x06: if (reg & 0x1000) usK = 3 << 24; - /* fall through */ + fallthrough; case 0x43: /* QAM64 */ c = 150204167; break; diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c index 89402916d301f4604c1490ba24275de91f8363b2..c1334d7eb442026ba04b8e45360be262c0c0a67f 100644 --- a/drivers/media/dvb-frontends/s5h1411.c +++ b/drivers/media/dvb-frontends/s5h1411.c @@ -398,7 +398,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz) default: dprintk("%s(%d KHz) Invalid, defaulting to 5380\n", __func__, KHz); - /* fall through */ + fallthrough; case 5380: case 44000: s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4); diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c index 2fc6aea580f90be3b587290287c35535aa01423d..2a2cf20a73d615093b96453f411df106df5f5616 100644 --- a/drivers/media/dvb-frontends/zl10353.c +++ b/drivers/media/dvb-frontends/zl10353.c @@ -201,7 +201,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe) break; default: c->bandwidth_hz = 8000000; - /* fall through */ + fallthrough; case 8000000: zl10353_single_write(fe, MCLK_RATIO, 0x75); zl10353_single_write(fe, 0x64, 0x36); @@ -258,7 +258,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe) if (c->hierarchy == HIERARCHY_AUTO || c->hierarchy == HIERARCHY_NONE) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 48ae60a2c6030a049d6499dfbcedbeff43801f86..c7ba76fee599a91c4d7056546523b3b8d93dce9d 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -467,7 +467,7 @@ config VIDEO_VPX3220 config VIDEO_MAX9286 tristate "Maxim MAX9286 GMSL deserializer support" depends on I2C && I2C_MUX - depends on OF + depends on OF_GPIO select V4L2_FWNODE select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER @@ -741,7 +741,7 @@ config VIDEO_HI556 config VIDEO_IMX214 tristate "Sony IMX214 sensor support" depends on GPIOLIB && I2C && VIDEO_V4L2 - depends on V4L2_FWNODE + select V4L2_FWNODE select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select REGMAP_I2C diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 570a4a09c387a553204ac54697c7bd0bb9c07110..03eee606af91528ca056e18ddce527c0a7c99a86 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -2209,7 +2209,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; - /* fall-through */ + fallthrough; case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP: ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ @@ -2370,7 +2370,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) /* Currently only enabled for the integrated IR controller */ if (!enable_885_ir) break; - /* fall-through */ + fallthrough; case CX23885_BOARD_HAUPPAUGE_HVR1250: case CX23885_BOARD_HAUPPAUGE_HVR1800: case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 7cabb9e9ffe21f53ffeb597d4ff415f01ea26110..92fe051c672f603606e4d9abcc2d451683d0cbdc 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -1310,7 +1310,7 @@ static void dvb_input_detach(struct ddb_input *input) dvb_unregister_frontend(dvb->fe2); if (dvb->fe) dvb_unregister_frontend(dvb->fe); - /* fallthrough */ + fallthrough; case 0x30: dvb_module_release(dvb->i2c_client[0]); dvb->i2c_client[0] = NULL; @@ -1321,22 +1321,22 @@ static void dvb_input_detach(struct ddb_input *input) dvb_frontend_detach(dvb->fe); dvb->fe = NULL; dvb->fe2 = NULL; - /* fallthrough */ + fallthrough; case 0x20: dvb_net_release(&dvb->dvbnet); - /* fallthrough */ + fallthrough; case 0x12: dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &dvb->hw_frontend); dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &dvb->mem_frontend); - /* fallthrough */ + fallthrough; case 0x11: dvb_dmxdev_release(&dvb->dmxdev); - /* fallthrough */ + fallthrough; case 0x10: dvb_dmx_release(&dvb->demux); - /* fallthrough */ + fallthrough; case 0x01: break; } @@ -1559,7 +1559,7 @@ static int dvb_input_attach(struct ddb_input *input) osc24 = 0; else osc24 = 1; - /* fall-through */ + fallthrough; case DDB_TUNER_DVBCT2_SONY_P: case DDB_TUNER_DVBC2T2_SONY_P: case DDB_TUNER_ISDBT_SONY_P: @@ -1575,7 +1575,7 @@ static int dvb_input_attach(struct ddb_input *input) break; case DDB_TUNER_DVBC2T2I_SONY: osc24 = 1; - /* fall-through */ + fallthrough; case DDB_TUNER_DVBCT2_SONY: case DDB_TUNER_DVBC2T2_SONY: case DDB_TUNER_ISDBT_SONY: @@ -2036,7 +2036,7 @@ static int ddb_port_attach(struct ddb_port *port) ret = ddb_ci_attach(port, ci_bitrate); if (ret < 0) break; - /* fall-through */ + fallthrough; case DDB_PORT_LOOP: ret = dvb_register_device(port->dvb[0].adap, &port->dvb[0].dev, @@ -2432,7 +2432,8 @@ void ddb_ports_init(struct ddb *dev) ddb_input_init(port, 4 + i, 1, 4 + i); ddb_output_init(port, i); break; - } /* fallthrough */ + } + fallthrough; case DDB_OCTOPUS: ddb_input_init(port, 2 * i, 0, 2 * i); ddb_input_init(port, 2 * i + 1, 1, 2 * i + 1); @@ -3417,7 +3418,7 @@ int ddb_exit_ddbridge(int stage, int error) default: case 2: destroy_workqueue(ddb_wq); - /* fall-through */ + fallthrough; case 1: ddb_class_destroy(); break; diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index 7fb3b1853b87211fcc8cda5d5e03c69435cb4d7d..8944e4bd4638261904f972fc719cf845de4e2187 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -952,7 +952,7 @@ static int meyeioc_sync(struct file *file, void *fh, int *i) mutex_unlock(&meye.lock); return -EINTR; } - /* fall through */ + fallthrough; case MEYE_BUF_DONE: meye.grab_buffer[*i].state = MEYE_BUF_UNUSED; if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused, diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index bf36b1e22b6357d8dc639c4e5acb9a0bddca44fd..45228f4f6fc6d9e39fe0bb5abbd03f296ff1a1bd 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -637,7 +637,7 @@ static void gpioirq(unsigned long cookie) iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2); break; } - /* fall through */ + fallthrough; case DATA_TS_RECORD: case DATA_PES_RECORD: @@ -2176,7 +2176,7 @@ static int frontend_init(struct av7110 *av7110) break; } } - /* fall-thru */ + fallthrough; case 0x0008: // Hauppauge/TT DVB-T // Grundig 29504-401 diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c index e8a8ec5405e2d2deead16144ef1512ae81ecb162..93ca31e38ddd3949169ee0b1f11eb41ae2f58c9c 100644 --- a/drivers/media/pci/ttpci/av7110_hw.c +++ b/drivers/media/pci/ttpci/av7110_hw.c @@ -1107,7 +1107,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc) break; case OSD_SetRow: dc->y1 = dc->y0; - /* fall through */ + fallthrough; case OSD_SetBlock: ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data); break; diff --git a/drivers/media/pci/ttpci/av7110_ipack.c b/drivers/media/pci/ttpci/av7110_ipack.c index ec528fae7333bd6943a3a517d9e0e8b68fb21770..30330ed01ce8852bd996ffab1322c9cf733a8dc4 100644 --- a/drivers/media/pci/ttpci/av7110_ipack.c +++ b/drivers/media/pci/ttpci/av7110_ipack.c @@ -182,7 +182,7 @@ int av7110_ipack_instant_repack (const u8 *buf, int count, struct ipack *p) case DSM_CC_STREAM : case ISO13522_STREAM: p->done = 1; - /* fall through */ + fallthrough; case PRIVATE_STREAM1: case VIDEO_STREAM_S ... VIDEO_STREAM_E: case AUDIO_STREAM_S ... AUDIO_STREAM_E: diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c index 38cac508bd728d2012bc3503b3252f49debc9e69..3cb83005cf09b6c186a081b144ceb2e3657cfab8 100644 --- a/drivers/media/pci/ttpci/budget-av.c +++ b/drivers/media/pci/ttpci/budget-av.c @@ -1226,7 +1226,7 @@ static void frontend_init(struct budget_av *budget_av) * but so far it has been only confirmed for this type */ budget_av->reinitialise_demod = 1; - /* fall through */ + fallthrough; case SUBID_DVBS_KNC1_PLUS: case SUBID_DVBS_EASYWATCH_1: if (saa->pci->subsystem_vendor == 0x1894) { diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c index 9c811272abfe73309078c661c88d364522c58fa4..a88711a3ac7fd1ee585ad390cd844d540219f95e 100644 --- a/drivers/media/pci/ttpci/budget.c +++ b/drivers/media/pci/ttpci/budget.c @@ -613,7 +613,7 @@ static void frontend_init(struct budget *budget) break; } } - /* fall through */ + fallthrough; case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262) { struct dvb_frontend *fe; @@ -638,7 +638,7 @@ static void frontend_init(struct budget *budget) break; } } - /* fall through */ + fallthrough; case 0x101c: { /* TT S2-1600 */ const struct stv6110x_devctl *ctl; diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c index 36e5f2ff4ef1d213e8c8a16e57f36125eb7e423c..b22dc1d725276f61bf2c47dfff21b00d93d54b3f 100644 --- a/drivers/media/platform/sh_vou.c +++ b/drivers/media/platform/sh_vou.c @@ -220,7 +220,7 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev) break; case V4L2_PIX_FMT_RGB565: dataswap ^= 1; - /* fall through */ + fallthrough; case V4L2_PIX_FMT_RGB565X: row_coeff = 2; break; @@ -802,7 +802,7 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt) default: pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n", __func__, bus_fmt); - /* fall through */ + fallthrough; case SH_VOU_BUS_8BIT: return 1; case SH_VOU_BUS_16BIT: diff --git a/drivers/media/platform/ti-vpe/cal.h b/drivers/media/platform/ti-vpe/cal.h index e496083715d213e3aa300f07e6460bf3a5325297..4123405ee0cf765221d4f70cdea9672f85438b99 100644 --- a/drivers/media/platform/ti-vpe/cal.h +++ b/drivers/media/platform/ti-vpe/cal.h @@ -226,7 +226,7 @@ static inline void cal_write_field(struct cal_dev *cal, u32 offset, u32 value, u32 val = cal_read(cal, offset); val &= ~mask; - val |= FIELD_PREP(mask, value); + val |= (value << __ffs(mask)) & mask; cal_write(cal, offset, val); } diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index b203296de9779d57ab2c2bef5c4d34c734c883ca..7e246026388291659e4914ad2ac447a3d4765f4b 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -105,7 +105,8 @@ static inline enum phase_diversity_modes_idx si476x_phase_diversity_mode_to_idx(enum si476x_phase_diversity_mode mode) { switch (mode) { - default: /* FALLTHROUGH */ + default: + fallthrough; case SI476X_PHDIV_DISABLED: return SI476X_IDX_PHDIV_DISABLED; case SI476X_PHDIV_PRIMARY_COMBINING: diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c index b0303cf0038798b3fd3c57bb0e7ebe3fc06de1ce..c37315226c427ac3e382055e0d3f8057d8ef6870 100644 --- a/drivers/media/radio/tea575x.c +++ b/drivers/media/radio/tea575x.c @@ -249,7 +249,7 @@ int snd_tea575x_enum_freq_bands(struct snd_tea575x *tea, index = BAND_AM; break; } - /* Fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 5bb144435c1605a86e484f692b170dddd15b81a6..3fe3edd8087656013846c33d3ac690d4700f542f 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -112,7 +112,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_trace_printk: if (perfmon_capable()) return bpf_get_trace_printk_proto(); - /* fall through */ + fallthrough; default: return NULL; } diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index f33b443bfa47b06fe103620fc71aecef3342e8a3..c6cd2e6d8e654d8cb5182f1bb7fc3436d7cdecf5 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -19,8 +19,6 @@ struct gpio_ir { struct gpio_desc *gpio; unsigned int carrier; unsigned int duty_cycle; - /* we need a spinlock to hold the cpu while transmitting */ - spinlock_t lock; }; static const struct of_device_id gpio_ir_tx_of_match[] = { @@ -53,12 +51,11 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier) static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, uint count) { - unsigned long flags; ktime_t edge; s32 delta; int i; - spin_lock_irqsave(&gpio_ir->lock, flags); + local_irq_disable(); edge = ktime_get(); @@ -72,14 +69,11 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, } gpiod_set_value(gpio_ir->gpio, 0); - - spin_unlock_irqrestore(&gpio_ir->lock, flags); } static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, uint count) { - unsigned long flags; ktime_t edge; /* * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on @@ -95,7 +89,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, space = DIV_ROUND_CLOSEST((100 - gpio_ir->duty_cycle) * (NSEC_PER_SEC / 100), gpio_ir->carrier); - spin_lock_irqsave(&gpio_ir->lock, flags); + local_irq_disable(); edge = ktime_get(); @@ -128,19 +122,20 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, edge = last; } } - - spin_unlock_irqrestore(&gpio_ir->lock, flags); } static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, unsigned int count) { struct gpio_ir *gpio_ir = dev->priv; + unsigned long flags; + local_irq_save(flags); if (gpio_ir->carrier) gpio_ir_tx_modulated(gpio_ir, txbuf, count); else gpio_ir_tx_unmodulated(gpio_ir, txbuf, count); + local_irq_restore(flags); return count; } @@ -176,7 +171,6 @@ static int gpio_ir_tx_probe(struct platform_device *pdev) gpio_ir->carrier = 38000; gpio_ir->duty_cycle = 50; - spin_lock_init(&gpio_ir->lock); rc = devm_rc_register_device(&pdev->dev, rcdev); if (rc < 0) diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index 95727ca910f715c7918de70a9785988002989dc1..0cda78f72fd800552149549e3b92be33286de909 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c @@ -64,7 +64,7 @@ static enum rc6_mode rc6_mode(struct rc6_dec *data) case 6: if (!data->toggle) return RC6_MODE_6A; - /* fall through */ + fallthrough; default: return RC6_MODE_UNKNOWN; } diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c index 9fa58d92eb09723ee0196aa883e0d4f02479844f..7d9a7c000c75aa7f839ee675364815d69df44aef 100644 --- a/drivers/media/rc/ir-sony-decoder.c +++ b/drivers/media/rc/ir-sony-decoder.c @@ -102,7 +102,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev) } data->state = STATE_FINISHED; - /* Fall through */ + fallthrough; case STATE_FINISHED: if (ev.pulse) diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index f9616158bcf44a27527394d03742c5543560e249..98681ba10428b4c9587c8432f69bb611a497f70f 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1726,7 +1726,7 @@ static int mceusb_dev_probe(struct usb_interface *intf, goto mem_alloc_fail; ir->pipe_in = pipe; - ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_ATOMIC, &ir->dma_in); + ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_KERNEL, &ir->dma_in); if (!ir->buf_in) goto buf_in_alloc_fail; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 7b53066d9d07c2278100d8bf7edf6cf881c25f13..dee8a9f3d80ac26c1cd869113ef364ab2cc18fb1 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1292,6 +1292,10 @@ static ssize_t store_protocols(struct device *device, } mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } old_protocols = *current_protocols; new_protocols = old_protocols; @@ -1430,6 +1434,10 @@ static ssize_t store_filter(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } new_filter = *filter; if (fattr->mask) @@ -1544,6 +1552,10 @@ static ssize_t store_wakeup_protocols(struct device *device, int i; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } allowed = dev->allowed_wakeup_protocols; @@ -1601,25 +1613,25 @@ static void rc_dev_release(struct device *device) kfree(dev); } -#define ADD_HOTPLUG_VAR(fmt, val...) \ - do { \ - int err = add_uevent_var(env, fmt, val); \ - if (err) \ - return err; \ - } while (0) - static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); + int ret = 0; - if (dev->rc_map.name) - ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); - if (dev->driver_name) - ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name); - if (dev->device_name) - ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name); + mutex_lock(&dev->lock); - return 0; + if (!dev->registered) + ret = -ENODEV; + if (ret == 0 && dev->rc_map.name) + ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name); + if (ret == 0 && dev->driver_name) + ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name); + if (ret == 0 && dev->device_name) + ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name); + + mutex_unlock(&dev->lock); + + return ret; } /* @@ -2011,14 +2023,14 @@ void rc_unregister_device(struct rc_dev *dev) del_timer_sync(&dev->timer_keyup); del_timer_sync(&dev->timer_repeat); - rc_free_rx_device(dev); - mutex_lock(&dev->lock); if (dev->users && dev->close) dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); + rc_free_rx_device(dev); + /* * lirc device should be freed with dev->registered = false, so * that userspace polling will get notified. diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c index 8941d73f6611db4a54cda773e0cadcedf0d5fcbd..71928e30dae809310fb7b8a4569194055258e657 100644 --- a/drivers/media/test-drivers/vicodec/vicodec-core.c +++ b/drivers/media/test-drivers/vicodec/vicodec-core.c @@ -1994,6 +1994,7 @@ static int vicodec_request_validate(struct media_request *req) } ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, vicodec_ctrl_stateless_state.id); + v4l2_ctrl_request_hdl_put(hdl); if (!ctrl) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index 734a92caad8d0a2a626f78af4106cdc5c98490d6..7b7d9fe4f945338792dba2d20f944ecbd266cd50 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -756,7 +756,7 @@ static int xc5000_set_digital_params(struct dvb_frontend *fe) if (!bw) bw = 6000000; /* fall to OFDM handling */ - /* fall through */ + fallthrough; case SYS_DMBTH: case SYS_DVBT: case SYS_DVBT2: diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index 198ddfb8d2b18e1fa12b22afdae24add748032b3..e3234d1690655c3ca744f6803a380e691633c59e 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -525,7 +525,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) case USB_SPEED_HIGH: info("running at HIGH speed."); break; - case USB_SPEED_UNKNOWN: /* fall through */ + case USB_SPEED_UNKNOWN: default: err("cannot handle USB speed because it is unknown."); return -ENODEV; diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 20c50c2d042e6386f9ef2eb3947d73421a4d4af9..e747548ab28695fd2d965a1336edc3dc225b6ec5 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -165,7 +165,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_BRIGHTNESS: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_BRIGHTNESS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -176,7 +176,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_CONTRAST: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_CONTRAST: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -184,7 +184,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_SATURATION: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_SATURATION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -195,7 +195,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_GPIO_DATA: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -203,7 +203,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -211,7 +211,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VC_MP_GPIO_DATA: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VC_MP_GPIO_DATA: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -219,7 +219,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION: cmd.buffer.block_data[0] = param; - /*fall through */ + fallthrough; case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -234,7 +234,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_FLICKER_MODES: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_FLICKER_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -281,7 +281,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_USER_MODE: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_USER_MODE: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -301,7 +301,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_WAKEUP: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_WAKEUP: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -309,7 +309,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_PW_CONTROL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_PW_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -322,7 +322,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM; @@ -331,7 +331,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_SYSTEM_CTRL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_SYSTEM_CTRL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -339,7 +339,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VP_EXP_MODES: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VP_EXP_MODES: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -347,7 +347,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_DEVICE_CONFIG: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_DEVICE_CONFIG: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -368,7 +368,7 @@ int cpia2_do_command(struct camera_data *cam, break; case CPIA2_CMD_SET_VC_CONTROL: cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_VC_CONTROL: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC; cmd.reg_count = 1; @@ -403,7 +403,7 @@ int cpia2_do_command(struct camera_data *cam, this register can also affect flicker modes */ cmd.buffer.block_data[0] = param; - /* fall through */ + fallthrough; case CPIA2_CMD_GET_USER_EFFECTS: cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP; cmd.reg_count = 1; @@ -1751,7 +1751,7 @@ int cpia2_set_fps(struct camera_data *cam, int framerate) CPIA2_VP_SENSOR_FLAGS_500) { return -EINVAL; } - /* Fall through */ + fallthrough; case CPIA2_VP_FRAMERATE_15: case CPIA2_VP_FRAMERATE_12_5: case CPIA2_VP_FRAMERATE_7_5: diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index d9f953f2d0882923bfcba1246c532808ba93bc66..425e470b0fd354aa7ea3888557f342b7df0639b5 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -996,7 +996,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev) /* The DVB core will handle it */ if (dev->tuner_type == TUNER_ABSENT) continue; - /* fall through */ + fallthrough; default: /* just to shut up a gcc warning */ ent->function = MEDIA_ENT_F_CONN_RF; break; diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 4ef3fa98d20f4d985e8fd1dd4857640508832aa3..52e648e2713aec5d507cb00c6afa7282fca38b0f 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -1659,14 +1659,14 @@ static int dib8096_set_param_override(struct dvb_frontend *fe) switch (band) { default: - deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency); - /* fall through */ + deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency); + fallthrough; case BAND_VHF: - state->dib8000_ops.set_gpio(fe, 3, 0, 1); - break; + state->dib8000_ops.set_gpio(fe, 3, 0, 1); + break; case BAND_UHF: - state->dib8000_ops.set_gpio(fe, 3, 0, 0); - break; + state->dib8000_ops.set_gpio(fe, 3, 0, 0); + break; } ret = state->set_param_save(fe); diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index f96626fe2c0b7088f411f166cf8b7a7d3304f806..a27a6844032528437561278a5816d0fe6513a4e7 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -1886,12 +1886,12 @@ static int dw2102_load_firmware(struct usb_device *dev, switch (le16_to_cpu(dev->descriptor.idProduct)) { case USB_PID_TEVII_S650: dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC; - /* fall through */ + fallthrough; case USB_PID_DW2104: reset = 1; dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, DW210X_WRITE_MSG); - /* fall through */ + fallthrough; case USB_PID_DW3101: reset = 0; dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, @@ -1924,7 +1924,7 @@ static int dw2102_load_firmware(struct usb_device *dev, break; } } - /* fall through */ + fallthrough; case 0x2101: dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2, DW210X_READ_MSG); diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 3f3fbcd60cc6211acf0062de7f5072ba1d81f255..45a2403aa039bc05e697602f11f38517b10fcf2b 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -2200,7 +2200,7 @@ static int check_range(enum v4l2_ctrl_type type, case V4L2_CTRL_TYPE_BOOLEAN: if (step != 1 || max > 1 || min < 0) return -ERANGE; - /* fall through */ + fallthrough; case V4L2_CTRL_TYPE_U8: case V4L2_CTRL_TYPE_U16: case V4L2_CTRL_TYPE_U32: diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index a556880f225a5fb85c0a709d7c7d53222453b988..f74b422808923933da81fd7cf3e87dc5ef080c78 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -782,7 +782,6 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only) p->stepwise.step_height); break; case V4L2_FRMSIZE_TYPE_CONTINUOUS: - /* fall through */ default: pr_cont("\n"); break; @@ -816,7 +815,6 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only) p->stepwise.step.denominator); break; case V4L2_FRMIVAL_TYPE_CONTINUOUS: - /* fall through */ default: pr_cont("\n"); break; @@ -3189,14 +3187,16 @@ static int video_put_user(void __user *arg, void *parg, unsigned int cmd) #ifdef CONFIG_COMPAT_32BIT_TIME case VIDIOC_DQEVENT_TIME32: { struct v4l2_event *ev = parg; - struct v4l2_event_time32 ev32 = { - .type = ev->type, - .pending = ev->pending, - .sequence = ev->sequence, - .timestamp.tv_sec = ev->timestamp.tv_sec, - .timestamp.tv_nsec = ev->timestamp.tv_nsec, - .id = ev->id, - }; + struct v4l2_event_time32 ev32; + + memset(&ev32, 0, sizeof(ev32)); + + ev32.type = ev->type; + ev32.pending = ev->pending; + ev32.sequence = ev->sequence; + ev32.timestamp.tv_sec = ev->timestamp.tv_sec; + ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec; + ev32.id = ev->id; memcpy(&ev32.u, &ev->u, sizeof(ev->u)); memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved)); @@ -3210,21 +3210,23 @@ static int video_put_user(void __user *arg, void *parg, unsigned int cmd) case VIDIOC_DQBUF_TIME32: case VIDIOC_PREPARE_BUF_TIME32: { struct v4l2_buffer *vb = parg; - struct v4l2_buffer_time32 vb32 = { - .index = vb->index, - .type = vb->type, - .bytesused = vb->bytesused, - .flags = vb->flags, - .field = vb->field, - .timestamp.tv_sec = vb->timestamp.tv_sec, - .timestamp.tv_usec = vb->timestamp.tv_usec, - .timecode = vb->timecode, - .sequence = vb->sequence, - .memory = vb->memory, - .m.userptr = vb->m.userptr, - .length = vb->length, - .request_fd = vb->request_fd, - }; + struct v4l2_buffer_time32 vb32; + + memset(&vb32, 0, sizeof(vb32)); + + vb32.index = vb->index; + vb32.type = vb->type; + vb32.bytesused = vb->bytesused; + vb32.flags = vb->flags; + vb32.field = vb->field; + vb32.timestamp.tv_sec = vb->timestamp.tv_sec; + vb32.timestamp.tv_usec = vb->timestamp.tv_usec; + vb32.timecode = vb->timecode; + vb32.sequence = vb->sequence; + vb32.memory = vb->memory; + vb32.m.userptr = vb->m.userptr; + vb32.length = vb->length; + vb32.request_fd = vb->request_fd; if (copy_to_user(arg, &vb32, sizeof(vb32))) return -EFAULT; diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 5c91fc3e65b50453a7f1929342502d9a7c3b18ca..606a271bdd2dddfd7b825c018a3da3068bd75b1a 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -354,7 +354,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, break; case VIDEOBUF_ERROR: b->flags |= V4L2_BUF_FLAG_ERROR; - /* fall through */ + fallthrough; case VIDEOBUF_DONE: b->flags |= V4L2_BUF_FLAG_DONE; break; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index f512cbc7a36c5e8047ba3041f99c3ffe02a53ff5..ca0097664b12506d8a4a59b1d7396731dd2b823e 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -313,7 +313,6 @@ static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd) tick_ps *= div; break; case GPMC_CD_FCLK: - /* FALL-THROUGH */ default: break; } diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index d9ee8e3dc72daa80d517dd022a9cd098abf5aa68..178954228631d8e4b02cbb8409d0c9d70eb6d3c0 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -371,7 +371,7 @@ static int h_msb_read_page(struct memstick_dev *card, serial mode), then just fall through */ if (msb_read_int_reg(msb, -1)) return 0; - /* fallthrough */ + fallthrough; case MSB_RP_RECEIVE_INT_REQ_RESULT: intreg = mrq->data[0]; @@ -403,7 +403,7 @@ static int h_msb_read_page(struct memstick_dev *card, case MSB_RP_RECEIVE_STATUS_REG: msb->regs.status = *(struct ms_status_register *)mrq->data; msb->state = MSB_RP_SEND_OOB_READ; - /* fallthrough */ + fallthrough; case MSB_RP_SEND_OOB_READ: if (!msb_read_regs(msb, @@ -418,7 +418,7 @@ static int h_msb_read_page(struct memstick_dev *card, msb->regs.extra_data = *(struct ms_extra_data_register *) mrq->data; msb->state = MSB_RP_SEND_READ_DATA; - /* fallthrough */ + fallthrough; case MSB_RP_SEND_READ_DATA: /* Skip that state if we only read the oob */ @@ -518,7 +518,7 @@ static int h_msb_write_block(struct memstick_dev *card, msb->state = MSB_WB_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0; - /* fallthrough */ + fallthrough; case MSB_WB_RECEIVE_INT_REQ: intreg = mrq->data[0]; @@ -549,7 +549,7 @@ static int h_msb_write_block(struct memstick_dev *card, msb->int_polling = false; msb->state = MSB_WB_SEND_WRITE_DATA; - /* fallthrough */ + fallthrough; case MSB_WB_SEND_WRITE_DATA: sg_init_table(sg, ARRAY_SIZE(sg)); @@ -628,7 +628,7 @@ static int h_msb_send_command(struct memstick_dev *card, msb->state = MSB_SC_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0; - /* fallthrough */ + fallthrough; case MSB_SC_RECEIVE_INT_REQ: intreg = mrq->data[0]; diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 4a6b866b0291cf91fa86f7f300b2d14633a7e655..e83c3ada9389ea460ac1fe339e6e9a9f28f89e0b 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -255,11 +255,11 @@ static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host, case 3: host->io_word[0] |= buf[off + 2] << 16; host->io_pos++; - /* fall through */ + fallthrough; case 2: host->io_word[0] |= buf[off + 1] << 8; host->io_pos++; - /* fall through */ + fallthrough; case 1: host->io_word[0] |= buf[off]; host->io_pos++; diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index fc35c74044298a216ad3e56d960a0710b0d89f1e..786e46798da24e9ff0b75ba090eabaffaccad266 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -162,11 +162,11 @@ static unsigned int tifm_ms_write_data(struct tifm_ms *host, case 3: host->io_word |= buf[off + 2] << 16; host->io_pos++; - /* fall through */ + fallthrough; case 2: host->io_word |= buf[off + 1] << 8; host->io_pos++; - /* fall through */ + fallthrough; case 1: host->io_word |= buf[off]; host->io_pos++; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 5216487db4fbeabcb32899b4104ec33dc01dd360..9903e9660a38dc3dc1bba417b1c1a4970a2ae7da 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -642,7 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) freereq = 0; if (event != MPI_EVENT_EVENT_CHANGE) break; - /* fall through */ + fallthrough; case MPI_FUNCTION_CONFIG: case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; @@ -1887,7 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) case MPI_MANUFACTPAGE_DEVICEID_FC939X: case MPI_MANUFACTPAGE_DEVICEID_FC949X: ioc->errata_flag_1064 = 1; - /* fall through */ + fallthrough; case MPI_MANUFACTPAGE_DEVICEID_FC909: case MPI_MANUFACTPAGE_DEVICEID_FC929: case MPI_MANUFACTPAGE_DEVICEID_FC919: @@ -1932,7 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) pcixcmd &= 0x8F; pci_write_config_byte(pdev, 0x6a, pcixcmd); } - /* fall through */ + fallthrough; case MPI_MANUFACTPAGE_DEVID_1030_53C1035: ioc->bus_type = SPI; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 6a79cd0ebe2b08bf6df0bc0680221c16c82c4f3d..18b91ea1a353f21c7ec85936cfc6faf51267af4e 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4326,7 +4326,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, } } mpt_findImVolumes(ioc); - /* fall through */ + fallthrough; case MPTSAS_ADD_DEVICE: memset(&sas_device, 0, sizeof(struct mptsas_devinfo)); diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 1491561d2e5c9af2c30a41d7005bd2a25ec90bad..8543f0324d5a820a28991929e7c93bdcf14ba53a 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -784,7 +784,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) /* * Allow non-SAS & non-NEXUS_LOSS to drop into below code */ - /* Fall through */ + fallthrough; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ /* Linux handles an unsolicited DID_RESET better @@ -881,7 +881,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */ scsi_set_resid(sc, 0); - /* Fall through */ + fallthrough; case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ sc->result = (DID_OK << 16) | scsi_status; diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index a9d9c1cdf546b65c551fd827697945dcb41b1006..a5983d515db030822401a4e9b4d79fc6c7086659 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -1515,10 +1515,10 @@ static unsigned long dsiclk_rate(u8 n) switch (divsel) { case PRCM_DSI_PLLOUT_SEL_PHI_4: div *= 2; - /* Fall through */ + fallthrough; case PRCM_DSI_PLLOUT_SEL_PHI_2: div *= 2; - /* Fall through */ + fallthrough; case PRCM_DSI_PLLOUT_SEL_PHI: return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), PLL_RAW) / div; diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c index af764bc87d7ceba5f7c8869ecc3d44a9591b2fa7..761b4ef3a3810db8ea21a00f8a7a333da1145400 100644 --- a/drivers/mfd/iqs62x.c +++ b/drivers/mfd/iqs62x.c @@ -136,7 +136,7 @@ static int iqs62x_dev_init(struct iqs62x_core *iqs62x) if (val & IQS620_PROX_SETTINGS_4_SAR_EN) iqs62x->ui_sel = IQS62X_UI_SAR1; - /* fall through */ + fallthrough; case IQS621_PROD_NUM: ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, @@ -470,7 +470,7 @@ static irqreturn_t iqs62x_irq(int irq, void *context) case IQS62X_EVENT_UI_LO: event_data.ui_data = get_unaligned_le16(&event_map[i]); - /* fall through */ + fallthrough; case IQS62X_EVENT_UI_HI: case IQS62X_EVENT_NONE: @@ -491,7 +491,7 @@ static irqreturn_t iqs62x_irq(int irq, void *context) case IQS62X_EVENT_HYST: event_map[i] <<= iqs62x->dev_desc->hyst_shift; - /* fall through */ + fallthrough; case IQS62X_EVENT_WHEEL: case IQS62X_EVENT_HALL: diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index c3651f06684fa81b2912b7924eaa02b0f35fa824..fc00aaccb5f72979868991c6383ea4b78fa4ed47 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -126,10 +126,6 @@ static int mfd_match_of_node_to_dev(struct platform_device *pdev, const __be32 *reg; u64 of_node_addr; - /* Skip devices 'disabled' by Device Tree */ - if (!of_device_is_available(np)) - return -ENODEV; - /* Skip if OF node has previously been allocated to a device */ list_for_each_entry(of_entry, &mfd_of_node_list, list) if (of_entry->np == np) @@ -212,6 +208,12 @@ static int mfd_add_device(struct device *parent, int id, if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) { for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { + /* Ignore 'disabled' devices error free */ + if (!of_device_is_available(np)) { + ret = 0; + goto fail_alias; + } + ret = mfd_match_of_node_to_dev(pdev, np, cell); if (ret == -EAGAIN) continue; @@ -370,8 +372,6 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); - kfree(cell); - platform_device_unregister(pdev); return 0; } diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c index 5bef142c483565cdb35da30f9ce388c20f2887cf..111d11fd25aad1f0b6178d9c932e4c550c993d35 100644 --- a/drivers/mfd/mxs-lradc.c +++ b/drivers/mfd/mxs-lradc.c @@ -172,7 +172,7 @@ static int mxs_lradc_probe(struct platform_device *pdev) MXS_LRADC_TOUCHSCREEN_5WIRE; break; } - /* fall through - to an error message for i.MX23 */ + fallthrough; /* to an error message for i.MX23 */ default: dev_err(&pdev->dev, "Unsupported number of touchscreen wires (%d)\n" diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 1e6431cb853602fc40dbcee8d589c5eca62c9010..2a3a240b4619a1e5abb628d126b361941ebf20f8 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -308,7 +308,7 @@ static int usbhs_runtime_resume(struct device *dev) i, r); } } - /* Fall through - as HSIC mode needs utmi_clk */ + fallthrough; /* as HSIC mode needs utmi_clk */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) { @@ -344,7 +344,7 @@ static int usbhs_runtime_suspend(struct device *dev) if (!IS_ERR(omap->hsic480m_clk[i])) clk_disable_unprepare(omap->hsic480m_clk[i]); - /* Fall through - as utmi_clks were used in HSIC mode */ + fallthrough; /* as utmi_clks were used in HSIC mode */ case OMAP_EHCI_PORT_MODE_TLL: if (!IS_ERR(omap->utmi_clk[i])) diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index abaab541df19dfaf8a17b9c1822075775c809e55..545196c85b5ca9d9f40743ae54737f6b21cd2be0 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c @@ -270,7 +270,7 @@ static void *stuff(unsigned char *dest, const unsigned char *src, size_t n) case RAVE_SP_ETX: case RAVE_SP_DLE: *dest++ = RAVE_SP_DLE; - /* FALLTHROUGH */ + fallthrough; default: *dest++ = byte; } @@ -541,7 +541,7 @@ static int rave_sp_receive_buf(struct serdev_device *serdev, * deframer buffer */ - /* FALLTHROUGH */ + fallthrough; case RAVE_SP_EXPECT_ESCAPED_DATA: if (deframer->length == sizeof(deframer->data)) { diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 75859e492984115fdfab54a9cc5e2024b530e583..df5cebb372a592fce06bd03412375452cecbd658 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -95,7 +95,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) break; default: pr_err("Failed to retrieve valid hwlock: %d\n", ret); - /* fall-through */ + fallthrough; case -EPROBE_DEFER: goto err_regmap; } diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2591c21b2b5d84667a8e8a6a03bee4c109721ace..26a23abc053d2cc6ec761b6c37f74a4999ff9cdf 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -692,10 +692,6 @@ static int at24_probe(struct i2c_client *client) nvmem_config.word_size = 1; nvmem_config.size = byte_len; - at24->nvmem = devm_nvmem_register(dev, &nvmem_config); - if (IS_ERR(at24->nvmem)) - return PTR_ERR(at24->nvmem); - i2c_set_clientdata(client, at24); err = regulator_enable(at24->vcc_reg); @@ -708,6 +704,13 @@ static int at24_probe(struct i2c_client *client) pm_runtime_set_active(dev); pm_runtime_enable(dev); + at24->nvmem = devm_nvmem_register(dev, &nvmem_config); + if (IS_ERR(at24->nvmem)) { + pm_runtime_disable(dev); + regulator_disable(at24->vcc_reg); + return PTR_ERR(at24->nvmem); + } + /* * Perform a one-byte test read to verify that the * chip is functional. diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index cde9a2fc1325065b054bf89363f7b568e15145c9..ed8d38b0992511283b764d507e3afb4ef4cf3712 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -90,10 +90,10 @@ static int at25_ee_read(void *priv, unsigned int offset, switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; - /* fall through */ + fallthrough; case 2: *cp++ = offset >> 8; - /* fall through */ + fallthrough; case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; @@ -178,10 +178,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; - /* fall through */ + fallthrough; case 2: *cp++ = offset >> 8; - /* fall through */ + fallthrough; case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; @@ -278,7 +278,7 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) switch (val) { case 9: chip->flags |= EE_INSTR_BIT3_IS_ADDR; - /* fall through */ + fallthrough; case 8: chip->flags |= EE_ADDR1; break; diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 7c38c4f7f9c02b4501787f1a92b054d469a8a527..a8004911c9771ac522290d3523ed6eab195aa2ac 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -10,6 +10,7 @@ #include #include +#include #include static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) @@ -300,7 +301,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) struct hl_device *hdev = hpriv->hdev; struct hl_cb *cb; phys_addr_t address; - u32 handle; + u32 handle, user_cb_size; int rc; handle = vma->vm_pgoff; @@ -314,7 +315,8 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) } /* Validation check */ - if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) { + user_cb_size = vma->vm_end - vma->vm_start; + if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { dev_err(hdev->dev, "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", vma->vm_end - vma->vm_start, cb->size); @@ -322,6 +324,16 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) goto put_cb; } + if (!access_ok((void __user *) (uintptr_t) vma->vm_start, + user_cb_size)) { + dev_err(hdev->dev, + "user pointer is invalid - 0x%lx\n", + vma->vm_start); + + rc = -EINVAL; + goto put_cb; + } + spin_lock(&cb->lock); if (cb->mmap) { diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index b9840e368eb576389cfb2747616cbce1a738ae19..2e3fcbc794db4af2b7ec3245f4ab76f0f6b6f54d 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -808,6 +808,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, /* currently it is guaranteed to have only one chunk */ chunk = &cs_chunk_array[0]; + + if (chunk->queue_index >= hdev->asic_prop.max_queues) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + rc = -EINVAL; + goto free_cs_chunk_array; + } + q_idx = chunk->queue_index; hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; q_type = hw_queue_prop->type; diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index c50c6fc9e905bc38e77ff22da26c6085aaa24957..aa77771635d33fca449c13e8bc7cde97315a3130 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -19,7 +19,7 @@ static struct dentry *hl_debug_root; static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, - u8 i2c_reg, u32 *val) + u8 i2c_reg, long *val) { struct armcp_packet pkt; int rc; @@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_reg = i2c_reg; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, (long *) val); + 0, val); if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); @@ -827,7 +827,7 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf, struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; char tmp_buf[32]; - u32 val; + long val; ssize_t rc; if (*ppos) @@ -842,7 +842,7 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf, return rc; } - sprintf(tmp_buf, "0x%02x\n", val); + sprintf(tmp_buf, "0x%02lx\n", val); rc = simple_read_from_buffer(buf, count, ppos, tmp_buf, strlen(tmp_buf)); @@ -982,7 +982,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf, return 0; sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask); - rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, + rc = simple_read_from_buffer(buf, count, ppos, tmp_buf, strlen(tmp_buf) + 1); return rc; diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index be16b75bdfdb5bbd0ebb54b550a2a4fcfe36e76e..24b01cce0a3846511f1af5406443296559d8f3f1 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -288,7 +288,7 @@ static int device_early_init(struct hl_device *hdev) for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) { snprintf(workq_name, 32, "hl-free-jobs-%u", i); hdev->cq_wq[i] = create_singlethread_workqueue(workq_name); - if (hdev->cq_wq == NULL) { + if (hdev->cq_wq[i] == NULL) { dev_err(hdev->dev, "Failed to allocate CQ workqueue\n"); rc = -ENOMEM; goto free_cq_wq; @@ -1069,7 +1069,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, goto out_err; } - hl_set_max_power(hdev, hdev->max_power); + hl_set_max_power(hdev); } else { rc = hdev->asic_funcs->soft_reset_late_init(hdev); if (rc) { @@ -1318,6 +1318,11 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) goto out_disabled; } + /* Need to call this again because the max power might change, + * depending on card type for certain ASICs + */ + hl_set_max_power(hdev); + /* * hl_hwmon_init() must be called after device_late_init(), because only * there we get the information from the device about which diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index f70302cdab1bde7fa39eba3ad6643e9b8dcc6783..f52bc690dfc5c68601950f878dee136d12ff006b 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -13,6 +13,7 @@ #include #include +#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ /** * hl_fw_load_fw_to_device() - Load F/W code to device's memory. * @@ -48,6 +49,14 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + if (fw_size > FW_FILE_MAX_SIZE) { + dev_err(hdev->dev, + "FW file size %zu exceeds maximum of %u bytes\n", + fw_size, FW_FILE_MAX_SIZE); + rc = -EINVAL; + goto out; + } + fw_data = (const u64 *) fw->data; memcpy_toio(dst, fw_data, fw_size); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 018d9d67e8e6ac2167107c1e2126411ea3caefad..edbd627b29d25749e6024223afbb1874c6ee69f6 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1462,6 +1462,8 @@ struct hl_device_idle_busy_ts { * details. * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. + * @card_type: Various ASICs have several card types. This indicates the card + * type of the current device. * @cs_active_cnt: number of active command submissions on this device (active * means already in H/W queues) * @major: habanalabs kernel driver major. @@ -1566,6 +1568,7 @@ struct hl_device { u64 clock_gating_mask; atomic_t in_reset; enum hl_pll_frequency curr_pll_profile; + enum armcp_card_types card_type; int cs_active_cnt; u32 major; u32 high_pll; @@ -1651,7 +1654,7 @@ struct hl_ioctl_desc { * * Return: true if the area is inside the valid range, false otherwise. */ -static inline bool hl_mem_area_inside_range(u64 address, u32 size, +static inline bool hl_mem_area_inside_range(u64 address, u64 size, u64 range_start_address, u64 range_end_address) { u64 end_address = address + size; @@ -1858,7 +1861,7 @@ int hl_get_pwm_info(struct hl_device *hdev, void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long value); u64 hl_get_max_power(struct hl_device *hdev); -void hl_set_max_power(struct hl_device *hdev, u64 value); +void hl_set_max_power(struct hl_device *hdev); int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value); int hl_set_current(struct hl_device *hdev, diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index dce9273e557a07aca8a852dff72487c4fa75369d..5ff4688683fd3714e73de42ace61ae7e17f648bf 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -66,6 +66,11 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; total_size = num_pgs << page_shift; + if (!total_size) { + dev_err(hdev->dev, "Cannot allocate 0 bytes\n"); + return -EINVAL; + } + contiguous = args->flags & HL_MEM_CONTIGUOUS; if (contiguous) { @@ -93,7 +98,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, phys_pg_pack->contiguous = contiguous; phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); - if (!phys_pg_pack->pages) { + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { rc = -ENOMEM; goto pages_arr_err; } @@ -683,7 +688,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), GFP_KERNEL); - if (!phys_pg_pack->pages) { + if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { rc = -ENOMEM; goto page_pack_arr_mem_err; } diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index edcc11d5eaf15a7989501378fa16e771d7d46322..3fc0f497fab349ca6a90256bcc2fcb7dd292b9a0 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -450,7 +450,7 @@ int hl_mmu_init(struct hl_device *hdev) hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, prop->mmu_hop_table_size, GFP_KERNEL | __GFP_ZERO); - if (!hdev->mmu_shadow_hop0) { + if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) { rc = -ENOMEM; goto err_pool_add; } diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c index 7bd3737571f38acabc08581b0246e016e818d970..2770f03b6cbb203b9be410765f70b20e2dd7523a 100644 --- a/drivers/misc/habanalabs/common/pci.c +++ b/drivers/misc/habanalabs/common/pci.c @@ -227,7 +227,7 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, } /* Point to the specified address */ - rc = hl_pci_iatu_write(hdev, offset + 0x14, + rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr)); rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr)); @@ -369,15 +369,17 @@ int hl_pci_init(struct hl_device *hdev) rc = hdev->asic_funcs->init_iatu(hdev); if (rc) { dev_err(hdev->dev, "Failed to initialize iATU\n"); - goto disable_device; + goto unmap_pci_bars; } rc = hl_pci_set_dma_mask(hdev); if (rc) - goto disable_device; + goto unmap_pci_bars; return 0; +unmap_pci_bars: + hl_pci_bars_unmap(hdev); disable_device: pci_clear_master(pdev); pci_disable_device(pdev); diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c index b3cb0ac4721c5bb3d74b7d24d9371e97de8df03c..5ae484cc84cd4fa1087d349ffdd99af00c2cd1f0 100644 --- a/drivers/misc/habanalabs/common/sysfs.c +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -81,7 +81,7 @@ u64 hl_get_max_power(struct hl_device *hdev) return result; } -void hl_set_max_power(struct hl_device *hdev, u64 value) +void hl_set_max_power(struct hl_device *hdev) { struct armcp_packet pkt; int rc; @@ -90,7 +90,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value) pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET << ARMCP_PKT_CTL_OPCODE_SHIFT); - pkt.value = cpu_to_le64(value); + pkt.value = cpu_to_le64(hdev->max_power); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL); @@ -316,7 +316,7 @@ static ssize_t max_power_store(struct device *dev, } hdev->max_power = value; - hl_set_max_power(hdev, value); + hl_set_max_power(hdev); out: return count; @@ -422,6 +422,7 @@ int hl_sysfs_init(struct hl_device *hdev) hdev->pm_mng_profile = PM_AUTO; else hdev->pm_mng_profile = PM_MANUAL; + hdev->max_power = hdev->asic_prop.max_power_default; hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 00a0a7238d81496f2a886eb77c1e02af9187f24e..4009b7df4cafec33ae1dc8bdb7090e18def1224a 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -154,6 +154,29 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = { [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe) }; +static inline bool validate_packet_id(enum packet_id id) +{ + switch (id) { + case PACKET_WREG_32: + case PACKET_WREG_BULK: + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_CP_DMA: + case PACKET_REPEAT: + case PACKET_MSG_PROT: + case PACKET_FENCE: + case PACKET_LIN_DMA: + case PACKET_NOP: + case PACKET_STOP: + case PACKET_ARB_POINT: + case PACKET_WAIT: + case PACKET_LOAD_AND_EXE: + return true; + default: + return false; + } +} + static const char * const gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = { "tpc_address_exceed_slm", @@ -433,7 +456,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->num_of_events = GAUDI_EVENT_SIZE; prop->tpc_enabled_mask = TPC_ENABLED_MASK; - prop->max_power_default = MAX_POWER_DEFAULT; + prop->max_power_default = MAX_POWER_DEFAULT_PCI; prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT; prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE; @@ -2485,6 +2508,7 @@ static void gaudi_set_clock_gating(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; u32 qman_offset; + bool enable; int i; /* In case we are during debug session, don't enable the clock gate @@ -2494,46 +2518,43 @@ static void gaudi_set_clock_gating(struct hl_device *hdev) return; for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) { - if (!(hdev->clock_gating_mask & - (BIT_ULL(gaudi_dma_assignment[i])))) - continue; + enable = !!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i]))); qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; - WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, + enable ? QMAN_CGM1_PWR_GATE_EN : 0); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, - QMAN_UPPER_CP_CGM_PWR_GATE_EN); + enable ? QMAN_UPPER_CP_CGM_PWR_GATE_EN : 0); } for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) { - if (!(hdev->clock_gating_mask & - (BIT_ULL(gaudi_dma_assignment[i])))) - continue; + enable = !!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i]))); qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; - WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, + enable ? QMAN_CGM1_PWR_GATE_EN : 0); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); + enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); } - if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) { - WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); - } + enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))); + WREG32(mmMME0_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmMME0_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); - if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) { - WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); - } + enable = !!(hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))); + WREG32(mmMME2_QM_CGM_CFG1, enable ? QMAN_CGM1_PWR_GATE_EN : 0); + WREG32(mmMME2_QM_CGM_CFG, enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { - if (!(hdev->clock_gating_mask & - (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)))) - continue; + enable = !!(hdev->clock_gating_mask & + (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))); WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, - QMAN_CGM1_PWR_GATE_EN); + enable ? QMAN_CGM1_PWR_GATE_EN : 0); WREG32(mmTPC0_QM_CGM_CFG + qman_offset, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); + enable ? QMAN_COMMON_CP_CGM_PWR_GATE_EN : 0); qman_offset += TPC_QMAN_OFFSET; } @@ -3772,6 +3793,12 @@ static int gaudi_validate_cb(struct hl_device *hdev, PACKET_HEADER_PACKET_ID_MASK) >> PACKET_HEADER_PACKET_ID_SHIFT); + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + pkt_size = gaudi_packet_sizes[pkt_id]; cb_parsed_length += pkt_size; if (cb_parsed_length > parser->user_cb_size) { @@ -3995,6 +4022,12 @@ static int gaudi_patch_cb(struct hl_device *hdev, PACKET_HEADER_PACKET_ID_MASK) >> PACKET_HEADER_PACKET_ID_SHIFT); + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + pkt_size = gaudi_packet_sizes[pkt_id]; cb_parsed_length += pkt_size; if (cb_parsed_length > parser->user_cb_size) { @@ -5215,7 +5248,7 @@ static int gaudi_extract_ecc_info(struct hl_device *hdev, *memory_wrapper_idx = 0xFF; /* Iterate through memory wrappers, a single bit must be set */ - for (i = 0 ; i > num_mem_regs ; i++) { + for (i = 0 ; i < num_mem_regs ; i++) { err_addr += i * 4; err_word = RREG32(err_addr); if (err_word) { @@ -6022,6 +6055,15 @@ static int gaudi_armcp_info_get(struct hl_device *hdev) strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN); + hdev->card_type = le32_to_cpu(hdev->asic_prop.armcp_info.card_type); + + if (hdev->card_type == armcp_card_type_pci) + prop->max_power_default = MAX_POWER_DEFAULT_PCI; + else if (hdev->card_type == armcp_card_type_pmc) + prop->max_power_default = MAX_POWER_DEFAULT_PMC; + + hdev->max_power = prop->max_power_default; + return 0; } diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 5dc99f6f02963495252676d51c55d0af90f7cc43..82137c3f3e2e6415343dc2aa8ccc7f17d7e404df 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -41,7 +41,8 @@ #define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */ -#define MAX_POWER_DEFAULT 200000 /* 200W */ +#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */ +#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */ #define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */ diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c index 5673ee49819e6b2cd6ceee9c5dcef28716345cd7..881531d4d9da8540ecf32f4cca6f92f742c78a72 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c @@ -527,7 +527,7 @@ static int gaudi_config_etf(struct hl_device *hdev, } static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr, - u32 size, bool *is_host) + u64 size, bool *is_host) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct gaudi_device *gaudi = hdev->asic_specific; @@ -539,6 +539,12 @@ static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr, return false; } + if (addr > (addr + size)) { + dev_err(hdev->dev, + "ETR buffer size %llu overflow\n", size); + return false; + } + /* PMMU and HPMMU addresses are equal, check only one of them */ if ((gaudi->hw_cap_initialized & HW_CAP_MMU) && hl_mem_area_inside_range(addr, size, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 85030759b2afed75f71a48109760fa128f175f09..33cd2ae653d23441e1686d48d8df2aa05b1c80e7 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -139,6 +139,25 @@ static u16 goya_packet_sizes[MAX_PACKET_ID] = { [PACKET_STOP] = sizeof(struct packet_stop) }; +static inline bool validate_packet_id(enum packet_id id) +{ + switch (id) { + case PACKET_WREG_32: + case PACKET_WREG_BULK: + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_CP_DMA: + case PACKET_MSG_PROT: + case PACKET_FENCE: + case PACKET_LIN_DMA: + case PACKET_NOP: + case PACKET_STOP: + return true; + default: + return false; + } +} + static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = { mmDMA_QM_0_GLBL_NON_SECURE_PROPS, mmDMA_QM_1_GLBL_NON_SECURE_PROPS, @@ -3455,6 +3474,12 @@ static int goya_validate_cb(struct hl_device *hdev, PACKET_HEADER_PACKET_ID_MASK) >> PACKET_HEADER_PACKET_ID_SHIFT); + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + pkt_size = goya_packet_sizes[pkt_id]; cb_parsed_length += pkt_size; if (cb_parsed_length > parser->user_cb_size) { @@ -3690,6 +3715,12 @@ static int goya_patch_cb(struct hl_device *hdev, PACKET_HEADER_PACKET_ID_MASK) >> PACKET_HEADER_PACKET_ID_SHIFT); + if (!validate_packet_id(pkt_id)) { + dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); + rc = -EINVAL; + break; + } + pkt_size = goya_packet_sizes[pkt_id]; cb_parsed_length += pkt_size; if (cb_parsed_length > parser->user_cb_size) { diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c index b03912483de0045ef0cb3b7b3bbf794dd82e7428..4027a6a334d7a61bb5df9e7a64912df6a4b60913 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -362,11 +362,17 @@ static int goya_config_etf(struct hl_device *hdev, } static int goya_etr_validate_address(struct hl_device *hdev, u64 addr, - u32 size) + u64 size) { struct asic_fixed_properties *prop = &hdev->asic_prop; u64 range_start, range_end; + if (addr > (addr + size)) { + dev_err(hdev->dev, + "ETR buffer size %llu overflow\n", size); + return false; + } + if (hdev->mmu_enable) { range_start = prop->dmmu.start_addr; range_end = prop->dmmu.end_addr; diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h index 13ef6b2887fd4246170c854c6562314eed633106..3510c42d24e3133624916e790d23ac5601bf628a 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h @@ -378,15 +378,15 @@ enum axi_id { ((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \ (((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT)) -#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 0) -#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 0) -#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 0) -#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 0) -#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 0) -#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 0) -#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 0) +#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 1) +#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 1) +#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 1) +#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 1) #define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \ - RAZWI_INITIATOR_ID_X_Y(8, 0) + RAZWI_INITIATOR_ID_X_Y(8, 1) #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0 RAZWI_INITIATOR_ID_X_Y(0, 1) #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0 RAZWI_INITIATOR_ID_X_Y(9, 1) #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1 RAZWI_INITIATOR_ID_X_Y(0, 2) @@ -395,14 +395,14 @@ enum axi_id { #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0 RAZWI_INITIATOR_ID_X_Y(9, 3) #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1 RAZWI_INITIATOR_ID_X_Y(0, 4) #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1 RAZWI_INITIATOR_ID_X_Y(9, 4) -#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 5) -#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 5) -#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 5) -#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 5) -#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 5) -#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 5) -#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 5) -#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 5) +#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 6) +#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 6) +#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 6) +#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6) +#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6) #define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1 diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index d1d3e025ca0e8799e44242831233e5dc23a9c3a1..9ae9669e46eae92d12906d86bcf2ef80c75488aa 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -546,38 +546,46 @@ static int mei_hdcp_verify_mprime(struct device *dev, struct hdcp_port_data *data, struct hdcp2_rep_stream_ready *stream_ready) { - struct wired_cmd_repeater_auth_stream_req_in - verify_mprime_in = { { 0 } }; + struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = { { 0 } }; struct mei_cl_device *cldev; ssize_t byte; + size_t cmd_size; if (!dev || !stream_ready || !data) return -EINVAL; cldev = to_mei_cl_device(dev); - verify_mprime_in.header.api_version = HDCP_API_VERSION; - verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; - verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS; - verify_mprime_in.header.buffer_len = + cmd_size = struct_size(verify_mprime_in, streams, data->k); + if (cmd_size == SIZE_MAX) + return -EINVAL; + + verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); + if (!verify_mprime_in) + return -ENOMEM; + + verify_mprime_in->header.api_version = HDCP_API_VERSION; + verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; + verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS; + verify_mprime_in->header.buffer_len = WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN; - verify_mprime_in.port.integrated_port_type = data->port_type; - verify_mprime_in.port.physical_port = (u8)data->fw_ddi; - verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc; + verify_mprime_in->port.integrated_port_type = data->port_type; + verify_mprime_in->port.physical_port = (u8)data->fw_ddi; + verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc; + + memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); + drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); - memcpy(verify_mprime_in.m_prime, stream_ready->m_prime, - HDCP_2_2_MPRIME_LEN); - drm_hdcp_cpu_to_be24(verify_mprime_in.seq_num_m, data->seq_num_m); - memcpy(verify_mprime_in.streams, data->streams, + memcpy(verify_mprime_in->streams, data->streams, array_size(data->k, sizeof(*data->streams))); - verify_mprime_in.k = cpu_to_be16(data->k); + verify_mprime_in->k = cpu_to_be16(data->k); - byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in, - sizeof(verify_mprime_in)); + byte = mei_cldev_send(cldev, (u8 *)verify_mprime_in, cmd_size); + kfree(verify_mprime_in); if (byte < 0) { dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); return byte; diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index 9cc6b2a6cf229cd34fa4661716f164cda69f7188..304d6c833712ae214006ce161f5bbc4f3021536f 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -178,7 +178,7 @@ int scif_close(scif_epd_t epd) case SCIFEP_ZOMBIE: dev_err(scif_info.mdev.this_device, "SCIFAPI close: zombie state unexpected\n"); - /* fall through */ + fallthrough; case SCIFEP_DISCONNECTED: spin_unlock(&ep->lock); scif_unregister_all_windows(epd); @@ -645,7 +645,7 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block) ep->port.port = err; ep->port.node = scif_info.nodeid; ep->conn_async_state = ASYNC_CONN_IDLE; - /* Fall through */ + fallthrough; case SCIFEP_BOUND: /* * If a non-blocking connect has been already initiated diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index de8f61efaef5cc292f9a519320c5fbefda64396d..2da3b474f4863d4e2e18d91e2072ce05a02b9f66 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -657,7 +657,7 @@ int scif_unregister_window(struct scif_window *window) window->unreg_state = OP_IN_PROGRESS; send_msg = true; } - /* fall through */ + fallthrough; case OP_IN_PROGRESS: { scif_get_window(window, 1); diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index f6e600bfac5d23b9f8c8c28d747436227af6fd38..0ea923fe6371b6b9e88b6b8e59e23b46df13774d 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -622,7 +622,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd, break; case CBSS_PAGE_OVERFLOW: STAT(mesq_noop_page_overflow); - /* fall through */ + fallthrough; default: BUG(); } @@ -780,7 +780,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, break; case CBSS_PAGE_OVERFLOW: STAT(mesq_page_overflow); - /* fall through */ + fallthrough; default: BUG(); } diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index d5e097cd556dbc60cee63277c4650e06d165dc73..8a495dc82f167cd85afd46ffe77b9937464a208f 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -1173,7 +1173,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) if (!xpc_kdebug_ignore) break; - /* fall through */ + fallthrough; case DIE_MCA_MONARCH_ENTER: case DIE_INIT_MONARCH_ENTER: xpc_arch_ops.offline_heartbeat(); @@ -1184,7 +1184,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) if (!xpc_kdebug_ignore) break; - /* fall through */ + fallthrough; case DIE_MCA_MONARCH_LEAVE: case DIE_INIT_MONARCH_LEAVE: xpc_arch_ops.online_heartbeat(); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 21a04bc97d40ae593640371e47845a4d5e25ef8e..099a53bdbb7d84a1d2fcf697a0b23d1b9b5e9b95 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -441,10 +441,10 @@ xpc_discovery(void) switch (region_size) { case 128: max_regions *= 2; - /* fall through */ + fallthrough; case 64: max_regions *= 2; - /* fall through */ + fallthrough; case 32: max_regions *= 2; region_size = 16; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 98c60f11b76b1046436e7917edc3c83ad56e8474..7791bde81a3684c9dbd8f65284c97583783187a9 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -574,7 +574,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, xpc_wakeup_channel_mgr(part); } - /* fall through */ + fallthrough; case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags |= XPC_P_ENGAGED_UV; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ce43f7573d80464a5b3af9e2b1795ea109447257..c8fae6611b73bda3a6cd665339021cad2d719fd8 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -191,7 +191,7 @@ int mmc_of_parse(struct mmc_host *host) switch (bus_width) { case 8: host->caps |= MMC_CAP_8_BIT_DATA; - /* fall through - Hosts capable of 8-bit can also do 4 bits */ + fallthrough; /* Hosts capable of 8-bit can also do 4 bits */ case 4: host->caps |= MMC_CAP_4_BIT_DATA; break; diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index 93d346c01110df06ebb63191004c51f00b21c213..4c229dd2b6e543d2318d83888e8a9d1798335dd3 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct sg_table sgtable; unsigned int nents, left_size, i; unsigned int seg_size = card->host->max_seg_size; + int err; WARN_ON(blksz == 0); @@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, mmc_set_data_timeout(&data, card); - mmc_wait_for_req(card->host, &mrq); + mmc_pre_req(card->host, &mrq); - if (nents > 1) - sg_free_table(&sgtable); + mmc_wait_for_req(card->host, &mrq); if (cmd.error) - return cmd.error; - if (data.error) - return data.error; - - if (mmc_host_is_spi(card->host)) { + err = cmd.error; + else if (data.error) + err = data.error; + else if (mmc_host_is_spi(card->host)) /* host driver already reported errors */ - } else { - if (cmd.resp[0] & R5_ERROR) - return -EIO; - if (cmd.resp[0] & R5_FUNCTION_NUMBER) - return -EINVAL; - if (cmd.resp[0] & R5_OUT_OF_RANGE) - return -ERANGE; - } + err = 0; + else if (cmd.resp[0] & R5_ERROR) + err = -EIO; + else if (cmd.resp[0] & R5_FUNCTION_NUMBER) + err = -EINVAL; + else if (cmd.resp[0] & R5_OUT_OF_RANGE) + err = -ERANGE; + else + err = 0; - return 0; + mmc_post_req(card->host, &mrq, err); + + if (nents > 1) + sg_free_table(&sgtable); + + return err; } int sdio_reset(struct mmc_host *host) diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 9c89a5b780e85f0b0c8a4a70cdb68ca0e0d4faf2..9a34c827c96ef7ae1cb79896c773c0091270e519 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -602,7 +602,7 @@ config MMC_GOLDFISH config MMC_SPI tristate "MMC/SD/SDIO over SPI" - depends on SPI_MASTER && HAS_DMA + depends on SPI_MASTER select CRC7 select CRC_ITU_T help diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 300901415aa20b71781a1411e8eaada05709a7b8..3fc3bbea8536e0470a080ddde11db4817656363f 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2418,7 +2418,7 @@ static void atmci_get_cap(struct atmel_mci *host) case 0x600: case 0x500: host->caps.has_odd_clk_div = 1; - /* Fall through */ + fallthrough; case 0x400: case 0x300: host->caps.has_dma_conf_reg = 1; @@ -2426,16 +2426,16 @@ static void atmci_get_cap(struct atmel_mci *host) host->caps.has_cfg_reg = 1; host->caps.has_cstor_reg = 1; host->caps.has_highspeed = 1; - /* Fall through */ + fallthrough; case 0x200: host->caps.has_rwproof = 1; host->caps.need_blksz_mul_4 = 0; host->caps.need_notbusy_for_read_ops = 1; - /* Fall through */ + fallthrough; case 0x100: host->caps.has_bad_data_ordering = 0; host->caps.need_reset_after_xfer = 0; - /* Fall through */ + fallthrough; case 0x0: break; default: diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index f01fecd75833d8a9f05fee959ce7a39cf290f35e..e50a08bce7efa19e316db2a0150b48b7933ff15c 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -300,7 +300,7 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, * then it's harmless for us to allow it. */ cmd_reg |= MMCCMD_BSYEXP; - /* FALLTHROUGH */ + fallthrough; case MMC_RSP_R1: /* 48 bits, CRC */ cmd_reg |= MMCCMD_RSPFMT_R1456; break; diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 50977ff18074175d5832f8d66fa9499da38daa09..db1a84b2ba612f3f1ca8be7b9bedd222d516eef4 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -238,7 +238,7 @@ static void dw_mci_hs_set_timing(struct dw_mci *host, int timing, if (smpl_phase >= USE_DLY_MIN_SMPL && smpl_phase <= USE_DLY_MAX_SMPL) use_smpl_dly = 1; - /* fallthrough */ + fallthrough; case MMC_TIMING_UHS_SDR50: if (smpl_phase >= ENABLE_SHIFT_MIN_SMPL && smpl_phase <= ENABLE_SHIFT_MAX_SMPL) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 35ae5737c62267f98fffa78126dce04f2d6b2ccf..0fba940544ca122683c427e5b36765ab7f5b3381 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2030,7 +2030,7 @@ static void dw_mci_tasklet_func(unsigned long priv) } prev_state = state = STATE_SENDING_DATA; - /* fall through */ + fallthrough; case STATE_SENDING_DATA: /* @@ -2088,7 +2088,7 @@ static void dw_mci_tasklet_func(unsigned long priv) } prev_state = state = STATE_DATA_BUSY; - /* fall through */ + fallthrough; case STATE_DATA_BUSY: if (!dw_mci_clear_pending_data_complete(host)) { @@ -2141,7 +2141,7 @@ static void dw_mci_tasklet_func(unsigned long priv) */ prev_state = state = STATE_SENDING_STOP; - /* fall through */ + fallthrough; case STATE_SENDING_STOP: if (!dw_mci_clear_pending_cmd_complete(host)) diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 447552ac25c4a34386e815390d7fac66b418d99a..81d71010b4742d6b8bfdf6287471903428a714e8 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -739,7 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; jz_mmc_prepare_data_transfer(host); - /* fall through */ + fallthrough; case JZ4740_MMC_STATE_TRANSFER_DATA: if (host->use_dma) { @@ -774,7 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); - /* fall through */ + fallthrough; case JZ4740_MMC_STATE_SEND_STOP: if (!req->stop) diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 9b2cf7afc246f9472eb46ae301c5401271cf693c..703d5834f9a526b255adb4bd72c331ef2bd4b166 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -294,7 +294,7 @@ static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: vdd = 0; - /* fall through */ + fallthrough; case MMC_POWER_UP: if (!IS_ERR(mmc->supply.vmmc)) { host->error = mmc_regulator_set_ocr(mmc, diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 39bb1e30c2d7223602f9d5145f36a6fce1519f6c..5055a7eb134ac13f25e57064e5779340961c8927 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1278,6 +1278,52 @@ mmc_spi_detect_irq(int irq, void *mmc) return IRQ_HANDLED; } +#ifdef CONFIG_HAS_DMA +static int mmc_spi_dma_alloc(struct mmc_spi_host *host) +{ + struct spi_device *spi = host->spi; + struct device *dev; + + if (!spi->master->dev.parent->dma_mask) + return 0; + + dev = spi->master->dev.parent; + + host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, host->ones_dma)) + return -ENOMEM; + + host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, host->data_dma)) { + dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE, + DMA_TO_DEVICE); + return -ENOMEM; + } + + dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data), + DMA_BIDIRECTIONAL); + + host->dma_dev = dev; + return 0; +} + +static void mmc_spi_dma_free(struct mmc_spi_host *host) +{ + if (!host->dma_dev) + return; + + dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, + DMA_TO_DEVICE); + dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), + DMA_BIDIRECTIONAL); +} +#else +static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } +static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} +#endif + static int mmc_spi_probe(struct spi_device *spi) { void *ones; @@ -1374,23 +1420,9 @@ static int mmc_spi_probe(struct spi_device *spi) if (!host->data) goto fail_nobuf1; - if (spi->master->dev.parent->dma_mask) { - struct device *dev = spi->master->dev.parent; - - host->dma_dev = dev; - host->ones_dma = dma_map_single(dev, ones, - MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); - if (dma_mapping_error(dev, host->ones_dma)) - goto fail_ones_dma; - host->data_dma = dma_map_single(dev, host->data, - sizeof(*host->data), DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, host->data_dma)) - goto fail_data_dma; - - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*host->data), - DMA_BIDIRECTIONAL); - } + status = mmc_spi_dma_alloc(host); + if (status) + goto fail_dma; /* setup message for status/busy readback */ spi_message_init(&host->readback); @@ -1458,20 +1490,12 @@ static int mmc_spi_probe(struct spi_device *spi) fail_add_host: mmc_remove_host(mmc); fail_glue_init: - if (host->dma_dev) - dma_unmap_single(host->dma_dev, host->data_dma, - sizeof(*host->data), DMA_BIDIRECTIONAL); -fail_data_dma: - if (host->dma_dev) - dma_unmap_single(host->dma_dev, host->ones_dma, - MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); -fail_ones_dma: + mmc_spi_dma_free(host); +fail_dma: kfree(host->data); - fail_nobuf1: mmc_free_host(mmc); mmc_spi_put_pdata(spi); - nomem: kfree(ones); return status; @@ -1489,13 +1513,7 @@ static int mmc_spi_remove(struct spi_device *spi) mmc_remove_host(mmc); - if (host->dma_dev) { - dma_unmap_single(host->dma_dev, host->ones_dma, - MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); - dma_unmap_single(host->dma_dev, host->data_dma, - sizeof(*host->data), DMA_BIDIRECTIONAL); - } - + mmc_spi_dma_free(host); kfree(host->data); kfree(host->ones); diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 4e2583f69a631a01c6cb9677cfed3118b1ccc900..b0c27944db7f72913845edb3e43527382df7636d 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -419,6 +420,7 @@ struct msdc_host { struct pinctrl_state *pins_uhs; struct delayed_work req_timeout; int irq; /* host interrupt */ + struct reset_control *reset; struct clk *src_clk; /* msdc source clock */ struct clk *h_clk; /* msdc h_clk */ @@ -1592,6 +1594,12 @@ static void msdc_init_hw(struct msdc_host *host) u32 val; u32 tune_reg = host->dev_comp->pad_tune_reg; + if (host->reset) { + reset_control_assert(host->reset); + usleep_range(10, 50); + reset_control_deassert(host->reset); + } + /* Configure to MMC/SD mode, clock free running */ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); @@ -2390,6 +2398,11 @@ static int msdc_drv_probe(struct platform_device *pdev) if (IS_ERR(host->src_clk_cg)) host->src_clk_cg = NULL; + host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, + "hrst"); + if (IS_ERR(host->reset)) + return PTR_ERR(host->reset); + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = -EINVAL; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 15e21894bd44a84ffacd77d1c416e552ed0b99ec..904f5237d8f7e4982a2095a5b9cea76837e3ace7 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -685,7 +685,7 @@ static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) case HOST_MODE: if (host->pdata->flags & TMIO_MMC_HAVE_CBSY) bit = TMIO_STAT_CMD_BUSY; - /* fallthrough */ + fallthrough; case CTL_SD_CARD_CLK_CTL: return renesas_sdhi_wait_idle(host, bit); } diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 48ecbd0b180d87f213f4655711afb500f18b2b15..284cba11e27952aa9fd241f3bd59e548fcc7398f 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -535,6 +535,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { .caps = MMC_CAP_NONREMOVABLE, }; +struct amd_sdhci_host { + bool tuned_clock; + bool dll_enabled; +}; + /* AMD sdhci reset dll register. */ #define SDHCI_AMD_RESET_DLL_REGISTER 0x908 @@ -546,39 +551,96 @@ static int amd_select_drive_strength(struct mmc_card *card, return MMC_SET_DRIVER_TYPE_A; } -static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) +static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) { + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + /* AMD Platform requires dll setting */ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); usleep_range(10, 20); - sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + if (enable) + sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); + + amd_host->dll_enabled = enable; } /* - * For AMD Platform it is required to disable the tuning - * bit first controller to bring to HS Mode from HS200 - * mode, later enable to tune to HS400 mode. + * The initialization sequence for HS400 is: + * HS->HS200->Perform Tuning->HS->HS400 + * + * The re-tuning sequence is: + * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 + * + * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 + * mode. If we switch to a different mode, we need to disable the tuned clock. + * If we have previously performed tuning and switch back to HS200 or + * HS400, we can re-enable the tuned clock. + * */ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); unsigned int old_timing = host->timing; + u16 val; sdhci_set_ios(mmc, ios); - if (old_timing == MMC_TIMING_MMC_HS200 && - ios->timing == MMC_TIMING_MMC_HS) - sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2); - if (old_timing != MMC_TIMING_MMC_HS400 && - ios->timing == MMC_TIMING_MMC_HS400) { - sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2); - sdhci_acpi_amd_hs400_dll(host); + + if (old_timing != host->timing && amd_host->tuned_clock) { + if (host->timing == MMC_TIMING_MMC_HS400 || + host->timing == MMC_TIMING_MMC_HS200) { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val |= SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } else { + val = sdhci_readw(host, SDHCI_HOST_CONTROL2); + val &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, val, SDHCI_HOST_CONTROL2); + } + + /* DLL is only required for HS400 */ + if (host->timing == MMC_TIMING_MMC_HS400 && + !amd_host->dll_enabled) + sdhci_acpi_amd_hs400_dll(host, true); + } +} + +static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + amd_host->tuned_clock = false; + + err = sdhci_execute_tuning(mmc, opcode); + + if (!err && !host->tuning_err) + amd_host->tuned_clock = true; + + return err; +} + +static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) +{ + struct sdhci_acpi_host *acpi_host = sdhci_priv(host); + struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); + + if (mask & SDHCI_RESET_ALL) { + amd_host->tuned_clock = false; + sdhci_acpi_amd_hs400_dll(host, false); } + + sdhci_reset(host, mask); } static const struct sdhci_ops sdhci_acpi_ops_amd = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; @@ -602,6 +664,7 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; host->mmc_host_ops.set_ios = amd_set_ios; + host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; return 0; } @@ -613,6 +676,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { SDHCI_QUIRK_32BIT_ADMA_SIZE, .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .probe_slot = sdhci_acpi_emmc_amd_probe_slot, + .priv_size = sizeof(struct amd_sdhci_host), }; struct sdhci_acpi_uid_slot { diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index a76b4513fbec6b7a6e87a444c275ca19345cb284..d738907a622f7ad01aecd9bafd9cd628d345098c 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1556,7 +1556,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, "failed to request card-detect gpio!\n"); return err; } - /* fall through */ + fallthrough; case ESDHC_CD_CONTROLLER: /* we have a working card_detect back */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 5a33389037cd0c2b7fef1cf61fc6d80909c32c5b..729868abd2dba42a71debbf59e594182137fe119 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1166,7 +1166,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); - int tuning_seq_cnt = 3; + int tuning_seq_cnt = 10; u8 phase, tuned_phases[16], tuned_phase_cnt = 0; int rc; struct mmc_ios ios = host->mmc->ios; @@ -1222,6 +1222,22 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { + /* + * All phases valid is _almost_ as bad as no phases + * valid. Probably all phases are not really reliable + * but we didn't detect where the unreliable place is. + * That means we'll essentially be guessing and hoping + * we get a good phase. Better to try a few times. + */ + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", + mmc_hostname(mmc)); + if (--tuning_seq_cnt) { + tuned_phase_cnt = 0; + goto retry; + } + } + rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 7c73d243dc6ceeb0a1a3b632d2905157372a0793..45881b3099567ded18e0c13f728150153eb9bf6e 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -81,6 +81,7 @@ struct sdhci_esdhc { bool quirk_tuning_erratum_type2; bool quirk_ignore_data_inhibit; bool quirk_delay_before_data_reset; + bool quirk_trans_complete_erratum; bool in_sw_tuning; unsigned int peripheral_clock; const struct esdhc_clk_fixup *clk_fixup; @@ -1177,10 +1178,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, static u32 esdhc_irq(struct sdhci_host *host, u32 intmask) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); u32 command; - if (of_find_compatible_node(NULL, NULL, - "fsl,p2020-esdhc")) { + if (esdhc->quirk_trans_complete_erratum) { command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); if (command == MMC_WRITE_MULTIPLE_BLOCK && @@ -1334,8 +1336,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) esdhc->clk_fixup = match->data; np = pdev->dev.of_node; - if (of_device_is_compatible(np, "fsl,p2020-esdhc")) + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { esdhc->quirk_delay_before_data_reset = true; + esdhc->quirk_trans_complete_erratum = true; + } clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index bb6802448b2f4a9573f083da896a85aa0d225010..af413805bbf1a23899c016a1dba8c09c744ea8cb 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -232,6 +232,14 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc) sdhci_dumpregs(mmc_priv(mmc)); } +static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + sdhci_reset(host, mask); +} + /*****************************************************************************\ * * * Hardware specific quirk handling * @@ -718,7 +726,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = { .set_power = sdhci_intel_set_power, .enable_dma = sdhci_pci_enable_dma, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = sdhci_cqhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, .irq = sdhci_cqhci_irq, diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 9194bb73e601b73828eef81f72877fe98b140e89..080ced1e63f0ab073a247d9f15017dd22cfe76fd 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -609,7 +609,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) switch (pdata->max_width) { case 8: host->mmc->caps |= MMC_CAP_8_BIT_DATA; - /* Fall through */ + fallthrough; case 4: host->mmc->caps |= MMC_CAP_4_BIT_DATA; break; diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index a910cb461ed7c1c280027e94bdb51b3a1e3b9caf..bafa2e41c8b6366e77be96a3e612b9a09e7d78fc 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -470,7 +470,7 @@ static int sdhci_sprd_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) break; default: - /* fall-through */ + fallthrough; case MMC_SIGNAL_VOLTAGE_330: ret = pinctrl_select_state(sprd_host->pinctrl, sprd_host->pins_default); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 0a3f9d024f2ab58f86edf2b7dc78743bb6c1e59d..13fbf70b5fde37478c26a2cdd90d08795f56f0a1 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -110,6 +110,12 @@ #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9) +/* + * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra + * SDMMC hardware data timeout. + */ +#define NVQUIRK_HAS_TMCLK BIT(10) + /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ #define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 @@ -140,6 +146,7 @@ struct sdhci_tegra_autocal_offsets { struct sdhci_tegra { const struct sdhci_tegra_soc_data *soc_data; struct gpio_desc *power_gpio; + struct clk *tmclk; bool ddr_signaling; bool pad_calib_required; bool pad_control_available; @@ -1418,7 +1425,6 @@ static const struct sdhci_ops tegra210_sdhci_ops = { static const struct sdhci_pltfm_data sdhci_tegra210_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | @@ -1434,7 +1440,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { NVQUIRK_HAS_PADCALIB | NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | - NVQUIRK_ENABLE_SDR104, + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, .min_tap_delay = 106, .max_tap_delay = 185, }; @@ -1456,7 +1463,6 @@ static const struct sdhci_ops tegra186_sdhci_ops = { static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | @@ -1473,6 +1479,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = { NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK | NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING, .min_tap_delay = 84, .max_tap_delay = 136, @@ -1485,7 +1492,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = { NVQUIRK_HAS_PADCALIB | NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | NVQUIRK_ENABLE_SDR50 | - NVQUIRK_ENABLE_SDR104, + NVQUIRK_ENABLE_SDR104 | + NVQUIRK_HAS_TMCLK, .min_tap_delay = 96, .max_tap_delay = 139, }; @@ -1613,6 +1621,43 @@ static int sdhci_tegra_probe(struct platform_device *pdev) goto err_power_req; } + /* + * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host + * timeout clock and SW can choose TMCLK or SDCLK for hardware + * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of + * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL. + * + * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses + * 12Mhz TMCLK which is advertised in host capability register. + * With TMCLK of 12Mhz provides maximum data timeout period that can + * be achieved is 11s better than using SDCLK for data timeout. + * + * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's + * supporting separate TMCLK. + */ + + if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) { + clk = devm_clk_get(&pdev->dev, "tmclk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -EPROBE_DEFER) + goto err_power_req; + + dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc); + clk = NULL; + } + + clk_set_rate(clk, 12000000); + rc = clk_prepare_enable(clk); + if (rc) { + dev_err(&pdev->dev, + "failed to enable tmclk: %d\n", rc); + goto err_power_req; + } + + tegra_host->tmclk = clk; + } + clk = devm_clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { rc = PTR_ERR(clk); @@ -1656,6 +1701,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) err_rst_get: clk_disable_unprepare(pltfm_host->clk); err_clk_get: + clk_disable_unprepare(tegra_host->tmclk); err_power_req: err_parse_dt: sdhci_pltfm_free(pdev); @@ -1673,6 +1719,7 @@ static int sdhci_tegra_remove(struct platform_device *pdev) reset_control_assert(tegra_host->rst); usleep_range(2000, 4000); clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(tegra_host->tmclk); sdhci_pltfm_free(pdev); diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index e6e9e286cc348932dba57186d4c8efa95320c729..03ce57ef4585886c138e64e2e5b8d5b605ce1aa9 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -527,7 +527,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host, ret = true; break; } - /* fall through */ + fallthrough; default: reg &= ~XENON_TIMING_ADJUST_SLOW_MODE; ret = false; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3ad394b40eb18cf8a59dca6f5a6c639d32a7c421..592a55a34b58eba8163d5277faecc880460f76f7 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2825,7 +2825,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) case MMC_TIMING_UHS_SDR50: if (host->flags & SDHCI_SDR50_NEEDS_TUNING) break; - /* FALLTHROUGH */ + fallthrough; default: goto out; diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 5987656e0474cd262234dbbb0b85b624a039513a..fd8b72d3e02c9a36c55bc40a647d2dbe8e415158 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -335,7 +335,7 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) break; case MMC_RSP_R1B: rc |= TIFM_MMCSD_RSP_BUSY; - /* fall-through */ + fallthrough; case MMC_RSP_R1: rc |= TIFM_MMCSD_RSP_R1; break; diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 369b8dee2e3d3f44dadf4379f44fee0557801f61..7666c90054ae937d8de19831271477afa43ae50f 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1343,7 +1343,7 @@ static int usdhi6_stop_cmd(struct usdhi6_host *host) host->wait = USDHI6_WAIT_FOR_STOP; return 0; } - /* fall through - Unsupported STOP command. */ + fallthrough; /* Unsupported STOP command */ default: dev_err(mmc_dev(host->mmc), "unsupported stop CMD%d for CMD%d\n", @@ -1691,7 +1691,7 @@ static void usdhi6_timeout_work(struct work_struct *work) switch (host->wait) { default: dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); - /* fall through - mrq can be NULL, but is impossible. */ + fallthrough; /* mrq can be NULL, but is impossible */ case USDHI6_WAIT_FOR_CMD: usdhi6_error_code(host); if (mrq) @@ -1713,7 +1713,7 @@ static void usdhi6_timeout_work(struct work_struct *work) host->offset, data->blocks, data->blksz, data->sg_len, sg_dma_len(sg), sg->offset); usdhi6_sg_unmap(host, true); - /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */ + fallthrough; /* page unmapped in USDHI6_WAIT_FOR_DATA_END */ case USDHI6_WAIT_FOR_DATA_END: usdhi6_error_code(host); data->error = -ETIMEDOUT; diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c index 12466b06692c83d3b3eb0a5150b753e54a3140ef..22ed051eb1a4a5c89f3ad1fa8cd46d576ab24564 100644 --- a/drivers/mux/adgs1408.c +++ b/drivers/mux/adgs1408.c @@ -93,7 +93,7 @@ static int adgs1408_probe(struct spi_device *spi) mux->idle_state = idle_state; break; } - /* fall through */ + fallthrough; default: dev_err(dev, "invalid idle-state %d\n", idle_state); return -EINVAL; diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 18428e104445f25198325b89025067fd65cbb11b..1c6c27f35ac4bc95d9ab1b635c77d1b77a0ec936 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -301,7 +301,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->irq = cops_irq(ioaddr, board); if (dev->irq) break; - /* fall through - Once no IRQ found on this port. */ + fallthrough; /* Once no IRQ found on this port */ case 1: retval = -EINVAL; goto err_out; diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 14a5fb3781453526914bdab493c9b68beaa596b3..98df38fe553ced05b714767d4ad628dcba45819a 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -363,13 +363,13 @@ static int __init arcrimi_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_err("Too many arguments\n"); - /* Fall through */ + fallthrough; case 3: /* Node ID */ node = ints[3]; - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index cd27fdc1059b908900fe72d7e23503750dface0a..f983c4ce6b07f186fd2b6cc136df9eabfa7c8df4 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -197,22 +197,22 @@ static int __init com20020isa_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_info("Too many arguments\n"); - /* Fall through */ + fallthrough; case 6: /* Timeout */ timeout = ints[6]; - /* Fall through */ + fallthrough; case 5: /* CKP value */ clockp = ints[5]; - /* Fall through */ + fallthrough; case 4: /* Backplane flag */ backplane = ints[4]; - /* Fall through */ + fallthrough; case 3: /* Node ID */ node = ints[3]; - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 186bbf87bc849f1e3e80180068412de2a5ff1606..cf214b7306715487b84cd2301fca4de1bada6f48 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -363,10 +363,10 @@ static int __init com90io_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_err("Too many arguments\n"); - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index bd75d06ad7dfc98c982a3c7f033508a8e398d3ae..3dc3d533cb19a258e72f57d46443b9b52d1b8df7 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -693,13 +693,13 @@ static int __init com90xx_setup(char *s) switch (ints[0]) { default: /* ERROR */ pr_err("Too many arguments\n"); - /* Fall through */ + fallthrough; case 3: /* Mem address */ shmem = ints[3]; - /* Fall through */ + fallthrough; case 2: /* IRQ */ irq = ints[2]; - /* Fall through */ + fallthrough; case 1: /* IO address */ io = ints[1]; } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 31e43a2197a30367b1290514ffc1e027fb8b45a8..aa001b16765ae152e666fb1fa0d2ebd4ef8784e6 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -130,7 +130,7 @@ static inline struct bonding *__get_bond_by_port(struct port *port) /** * __get_first_agg - get the first aggregator in the bond - * @bond: the bond we're looking at + * @port: the port we're looking at * * Return the aggregator of the first slave in @bond, or %NULL if it can't be * found. @@ -1149,7 +1149,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) port->actor_oper_port_state &= ~LACP_STATE_EXPIRED; port->sm_rx_state = AD_RX_PORT_DISABLED; - /* Fall Through */ + fallthrough; case AD_RX_PORT_DISABLED: port->sm_vars &= ~AD_PORT_MATCHED; break; @@ -1588,7 +1588,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, if (__agg_active_ports(curr) < __agg_active_ports(best)) return best; - /*FALLTHROUGH*/ + fallthrough; case BOND_AD_STABLE: case BOND_AD_BANDWIDTH: if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best)) @@ -1626,7 +1626,7 @@ static int agg_device_up(const struct aggregator *agg) /** * ad_agg_selection_logic - select an aggregation group for a team - * @aggregator: the aggregator we're looking at + * @agg: the aggregator we're looking at * @update_slave_arr: Does slave array need update? * * It is assumed that only one aggregator may be selected for a team. @@ -1810,7 +1810,7 @@ static void ad_initialize_agg(struct aggregator *aggregator) /** * ad_initialize_port - initialize a given port's parameters - * @aggregator: the aggregator we're looking at + * @port: the port we're looking at * @lacp_fast: boolean. whether fast periodic should be used */ static void ad_initialize_port(struct port *port, int lacp_fast) @@ -1967,6 +1967,7 @@ static void ad_marker_response_received(struct bond_marker *marker, /** * bond_3ad_initiate_agg_selection - initate aggregator selection * @bond: bonding struct + * @timeout: timeout value to set * * Set the aggregation selection timer, to initiate an agg selection in * the very near future. Called during first initialization, and during @@ -2259,7 +2260,7 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) /** * bond_3ad_state_machine_handler - handle state machines timeout - * @bond: bonding struct to work on + * @work: work context to fetch bonding struct to work on from * * The state machine handling concept in this module is to check every tick * which state machine should operate any function. The execution order is @@ -2500,7 +2501,7 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave) /** * bond_3ad_handle_link_change - handle a slave's link status change indication * @slave: slave struct to work on - * @status: whether the link is now up or down + * @link: whether the link is now up or down * * Handle reselection of aggregator (if needed) for this port. */ @@ -2551,7 +2552,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) /** * bond_3ad_set_carrier - set link state for bonding master - * @bond - bonding structure + * @bond: bonding structure * * if we have an active aggregator, we're up, if not, we're down. * Presumes that we cannot have an active aggregator if there are @@ -2664,7 +2665,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, /** * bond_3ad_update_lacp_rate - change the lacp rate - * @bond - bonding struct + * @bond: bonding struct * * When modify lacp_rate parameter via sysfs, * update actor_oper_port_state of each port. diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 095ea51d185393ba6900c5724d317de27043cd63..4e1b7deb724b4aa897030d5db6cfc142810bbc0a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1206,8 +1206,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav /** * alb_set_mac_address - * @bond: - * @addr: + * @bond: bonding we're working on + * @addr: MAC address to set * * In TLB mode all slaves are configured to the bond's hw address, but set * their dev_addr field to different addresses (based on their permanent hw diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5ad43aaf76e56a8f069974925082f58cf160c9ae..42ef25ec0af5b089365c5a0b0378c40c22db1afe 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -322,6 +322,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, /** * bond_vlan_rx_add_vid - Propagates adding an id to slaves * @bond_dev: bonding net device that got called + * @proto: network protocol ID * @vid: vlan id being added */ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, @@ -355,6 +356,7 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, /** * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves * @bond_dev: bonding net device that got called + * @proto: network protocol ID * @vid: vlan id being removed */ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, @@ -948,7 +950,7 @@ static bool bond_should_notify_peers(struct bonding *bond) /** * change_active_interface - change the active slave into the specified one * @bond: our bonding struct - * @new: the new slave to make the active one + * @new_active: the new slave to make the active one * * Set the new slave to the bond's settings and unset them on the old * curr_active_slave. @@ -2205,7 +2207,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond\n"); bond_remove_proc_entry(bond); @@ -2271,7 +2274,7 @@ static int bond_miimon_inspect(struct bonding *bond) "active " : "backup ") : "", bond->params.downdelay * bond->params.miimon); } - /*FALLTHRU*/ + fallthrough; case BOND_LINK_FAIL: if (link_state) { /* recovered before downdelay expired */ @@ -2307,7 +2310,7 @@ static int bond_miimon_inspect(struct bonding *bond) bond->params.updelay * bond->params.miimon); } - /*FALLTHRU*/ + fallthrough; case BOND_LINK_BACK: if (!link_state) { bond_propose_link_state(slave, BOND_LINK_DOWN); @@ -2945,6 +2948,9 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_time_in_interval(bond, last_rx, 1)) { bond_propose_link_state(slave, BOND_LINK_UP); commit++; + } else if (slave->link == BOND_LINK_BACK) { + bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; } continue; } @@ -3053,6 +3059,19 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; + case BOND_LINK_FAIL: + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_NOW); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); + + /* A slave has just been enslaved and has become + * the current active slave. + */ + if (rtnl_dereference(bond->curr_active_slave)) + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + continue; + default: slave_err(bond->dev, slave->dev, "impossible: link_new_state %d on slave\n", @@ -3103,8 +3122,6 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && bond_slave_is_up(slave)) before = slave; @@ -3305,7 +3322,7 @@ static int bond_slave_netdev_event(unsigned long event, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_adapter_speed_duplex_changed(slave); - /* Fallthrough */ + fallthrough; case NETDEV_DOWN: /* Refresh slave-array if applicable! * If the setup does not use miimon or arpmon (mode-specific!), @@ -3743,7 +3760,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return -EINVAL; mii->phy_id = 0; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* We do this again just in case we were called by SIOCGMIIREG * instead of SIOCGMIIPHY. @@ -4552,13 +4569,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4570,8 +4597,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 9df2007b5e56b50e7a5f19368533239c80c78253..38e9f80ed1ef688759c961b7d24ef02dbc61512d 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -898,7 +898,7 @@ static void at91_irq_err_state(struct net_device *dev, CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: /* * from: ERROR_ACTIVE, ERROR_WARNING @@ -948,7 +948,7 @@ static void at91_irq_err_state(struct net_device *dev, netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 6ad83a88103950c5eafa6ecab4983ca17673e3f3..9469d4421afe5dfccadcda9f0227cd583e8d06e3 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -659,7 +659,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, PCIEFD_REG_CAN_CLK_SEL); - /* fall through */ + fallthrough; case CANFD_CLK_SEL_80MHZ: priv->ucan.can.clock.freq = 80 * 1000 * 1000; break; diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index d7222ba466225cd423c9974fd65c76dac965ad03..d7c2ec529b8f131f34f7e9bd5c93dc8e3dc8929a 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -150,7 +150,7 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of) priv->read_reg = sp_read_reg16; priv->write_reg = sp_write_reg16; break; - case 1: /* fallthrough */ + case 1: default: priv->read_reg = sp_read_reg8; priv->write_reg = sp_write_reg8; diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 91cdc0a2b1a740e81aba847551528a7afaed0fef..b4a39f0449ba4be95af334d4c30dcbd84465e7d4 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -153,7 +153,7 @@ static void slc_bump(struct slcan *sl) switch (*cmd) { case 'r': cf.can_id = CAN_RTR_FLAG; - /* fallthrough */ + fallthrough; case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; @@ -163,7 +163,7 @@ static void slc_bump(struct slcan *sl) break; case 'R': cf.can_id = CAN_RTR_FLAG; - /* fallthrough */ + fallthrough; case 'T': cf.can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 5009ff29494113956d08d9f0692b2aeb5650c49e..d17608870f2d2f0989151a127236adeca76e7a52 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -864,7 +864,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_warning++; - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index d2539c95adb65cc24cc7bdb73a954d721071d1a1..66d0198e7834db52db71ed6369a4c8075f0cc40e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -415,7 +415,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, new_state = CAN_STATE_ERROR_WARNING; break; } - /* fall through */ + fallthrough; case CAN_STATE_ERROR_WARNING: if (n & PCAN_USB_ERROR_BUS_HEAVY) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 0b7766b715fd22a26adaecb6491e69bef7d2798b..d91df34e7fa8800f6931e7d4dade99c05a62dea4 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -345,7 +345,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); - /* fall through */ + fallthrough; case -ENOENT: /* cable unplugged */ stats->tx_dropped++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 53cb2f72bdd0574b4fe6bcd654dd12cf57179546..1689ab387612062e013bec742ae19927e612b37d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -133,10 +133,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...) switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; - /* fall through */ + fallthrough; case PCAN_USBPRO_TXMSG4: i += 4; - /* fall through */ + fallthrough; case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 6500179c2ca22e7f960be1d537acf2559c64d78b..e731db900ee02a43fc15b06ed54553b4253dd8d2 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1061,7 +1061,7 @@ static void b53_force_port_config(struct b53_device *dev, int port, switch (speed) { case 2000: reg |= PORT_OVERRIDE_SPEED_2000M; - /* fallthrough */ + fallthrough; case SPEED_1000: reg |= PORT_OVERRIDE_SPEED_1000M; break; @@ -1554,6 +1554,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return ret; switch (ret) { + case -ETIMEDOUT: + return ret; case -ENOSPC: dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", addr, vid); diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index 629bf14128a283ef1deeb089396fb90fff5bb17a..5ae3d9783b68cfb424eae1bced0b571181510d08 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -170,7 +170,7 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port, switch (lane) { case 0: phylink_set(supported, 2500baseX_Full); - /* fallthrough */ + fallthrough; case 1: phylink_set(supported, 1000baseX_Full); break; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index bafddb35f3a990b02ce0de5613c2b224cfa747c8..5ebff986a1ac74a52e13e592068fec907e55f411 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -566,7 +566,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, switch (state->interface) { case PHY_INTERFACE_MODE_RGMII: id_mode_dis = 1; - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_RGMII_TXID: port_mode = EXT_GPHY; break; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index dc999406ce866b7d0750be346f60939625070158..3cb22d149813188475f0c54faa1ad45384bcbc20 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1083,7 +1083,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) interface = PHY_INTERFACE_MODE_GMII; if (gbit) break; - /* fall through */ + fallthrough; case 0: interface = PHY_INTERFACE_MODE_MII; break; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8dcb8a49ab67f750b273fef012c9ce877050b9cd..1aaf47a0da2b35523a8af8b886c532de20d236db 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -566,7 +566,7 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) case P5_INTF_SEL_PHY_P0: /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ val |= MHWTRAP_PHY0_SEL; - /* fall through */ + fallthrough; case P5_INTF_SEL_PHY_P4: /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; @@ -1326,14 +1326,17 @@ mt7530_setup(struct dsa_switch *ds) if (phy_node->parent == priv->dev->of_node->parent) { ret = of_get_phy_mode(mac_np, &interface); - if (ret && ret != -ENODEV) + if (ret && ret != -ENODEV) { + of_node_put(mac_np); return ret; + } id = of_mdio_parse_addr(ds->dev, phy_node); if (id == 0) priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; if (id == 4) priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; } + of_node_put(mac_np); of_node_put(phy_node); break; } @@ -1501,7 +1504,7 @@ static void mt7530_phylink_validate(struct dsa_switch *ds, int port, phylink_set(mask, 100baseT_Full); if (state->interface != PHY_INTERFACE_MODE_MII) { - phylink_set(mask, 1000baseT_Half); + /* This switch only supports 1G full-duplex. */ phylink_set(mask, 1000baseT_Full); if (port == 5) phylink_set(mask, 1000baseX_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7a71c9902e73e82437803fbf50be74c7766221dc..f0dbc05e30a4de50c32c868994c28dd62f1f4b2f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -875,7 +875,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, break; case STATS_TYPE_BANK1: reg = bank1_select; - /* fall through */ + fallthrough; case STATS_TYPE_BANK0: reg |= s->reg | histogram; mv88e6xxx_g1_stats_read(chip, reg, &low); diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index f121619d81fef486798cae1176b5a268f3da9b48..2d23ccef7d0edb32467a1df44de27803ce2b9d58 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -9,7 +9,7 @@ config NET_DSA_MSCC_FELIX select NET_DSA_TAG_OCELOT select FSL_ENETC_MDIO help - This driver supports network switches from the the Vitesse / + This driver supports network switches from the Vitesse / Microsemi / Microchip Ocelot family of switching cores that are connected to their host CPU via Ethernet. The following switches are supported: diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index c69d9592a2b79487186909dd96839c1918c6979d..04bfa6e465ffdb7767dc7beadca49473d21d1ddf 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -400,6 +400,7 @@ static int felix_parse_ports_node(struct felix *felix, if (err < 0) { dev_err(dev, "Unsupported PHY mode %s on port %d\n", phy_modes(phy_mode), port); + of_node_put(child); return err; } diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index c3f6f124e5f01ba9508e0dd733db1eae3c44af4e..5a28dfb36ec36acbcc2e0189f045f8a55dd39c38 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3415,7 +3415,7 @@ static int sja1105_check_device_id(struct sja1105_private *priv) sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); - for (match = sja1105_dt_ids; match->compatible; match++) { + for (match = sja1105_dt_ids; match->compatible[0]; match++) { const struct sja1105_info *info = match->data; /* Is what's been probed in our match table at all? */ diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 139d0120f5114e416e2ef71effba41a50e66e668..667f38c9e4c63051131966e1349cf5774b37d594 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1259,14 +1259,14 @@ el3_up(struct net_device *dev) pr_cont("Forcing 3c5x9b full-duplex mode"); break; } - /* fall through */ + fallthrough; case 8: /* set full-duplex mode based on eeprom config setting */ if ((sw_info & 0x000f) && (sw_info & 0x8000)) { pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); break; } - /* fall through */ + fallthrough; default: /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index ef1c3151fbb248b4bd4ecf3a699c047a2c1bd42d..f66e7fb9a2bb9357884389c47cadbe43cf7b9e93 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -951,7 +951,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev) static void update_stats(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; - u8 rx, tx, up; + u8 up; pr_debug("%s: updating the statistics.\n", dev->name); @@ -972,8 +972,8 @@ static void update_stats(struct net_device *dev) dev->stats.tx_packets += (up&0x30) << 4; /* Rx packets */ inb(ioaddr + 7); /* Tx deferrals */ inb(ioaddr + 8); - rx = inw(ioaddr + 10); - tx = inw(ioaddr + 12); + /* rx */ inw(ioaddr + 10); + /* tx */ inw(ioaddr + 12); EL3WINDOW(4); /* BadSSD */ inb(ioaddr + 12); @@ -1046,7 +1046,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = phy; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read the specified MII register. */ { int saved_window; diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index aeae7966a082f62cfeef2c76c4cffb8a30e73adc..a00b36f91d9f21b7b77b3663d6bf3d1e83b7aae7 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -610,7 +610,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* Fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; @@ -898,6 +898,7 @@ static int ax_close(struct net_device *dev) /** * axnet_tx_timeout - handle transmit time out condition * @dev: network device which has apparently fallen asleep + * @txqueue: unused * * Called by kernel when device never acknowledges a transmit has * completed (or failed) - i.e. never posted a Tx related interrupt. diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 645efac6310dde9c9eab6bd128861beed0acece6..164c3ed550bf1bbaa6bb075db68b7dd14e6f747a 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1108,7 +1108,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 6234fcd844eead5f8e11f0b9010ce1b3418fd043..696517eae77f0aeae178181c52d4b39021bc16f8 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1712,13 +1712,13 @@ static bool slic_is_fiber(unsigned short subdev) { switch (subdev) { /* Mojave */ - case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: /* fallthrough */ + case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F: + case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough; /* Oasis */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: /* fallthrough */ - case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: /* fallthrough */ + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF: + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF: + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF: + case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF: return true; } return false; diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index ac86fcae1582d6eac2b1a64ec35fe9fb5c20beeb..8470c836fa188a18446f0d00566cf93791f66394 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -547,7 +547,7 @@ static int acenic_probe_one(struct pci_dev *pdev, ap->name); break; } - /* Fall through */ + fallthrough; case PCI_VENDOR_ID_SGI: printk(KERN_INFO "%s: SGI AceNIC ", ap->name); break; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 2a6c9725e0922566d69dad2dffbf304f928ce576..a3a8edf9a734d02474080e4ae686d93c45a44239 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2180,13 +2180,10 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter, int i; for (i = first_index; i < first_index + count; i++) { - /* Check if napi was initialized before */ - if (!ENA_IS_XDP_INDEX(adapter, i) || - adapter->ena_napi[i].xdp_ring) - netif_napi_del(&adapter->ena_napi[i].napi); - else - WARN_ON(ENA_IS_XDP_INDEX(adapter, i) && - adapter->ena_napi[i].xdp_ring); + netif_napi_del(&adapter->ena_napi[i].napi); + + WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && + adapter->ena_napi[i].xdp_ring); } } @@ -3601,16 +3598,14 @@ static void ena_fw_reset_device(struct work_struct *work) { struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); - struct pci_dev *pdev = adapter->pdev; - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - dev_err(&pdev->dev, - "device reset schedule while reset bit is off\n"); - return; - } rtnl_lock(); - ena_destroy_device(adapter, false); - ena_restore_device(adapter); + + if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + ena_destroy_device(adapter, false); + ena_restore_device(adapter); + } + rtnl_unlock(); } @@ -3692,7 +3687,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, } u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx = missed_tx; + tx_ring->tx_stats.missed_tx += missed_tx; u64_stats_update_end(&tx_ring->syncp); return rc; @@ -4389,8 +4384,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ - del_timer_sync(&adapter->timer_service); + /* Make sure timer and reset routine won't be called after + * freeing device resources. + */ + del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); rtnl_lock(); /* lock released inside the below if-else block */ @@ -4558,6 +4556,9 @@ static void ena_keep_alive_wd(void *adapter_data, tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; u64_stats_update_begin(&adapter->syncp); + /* These stats are accumulated by the device, so the counters indicate + * all drops since last reset. + */ adapter->dev_stats.rx_drops = rx_drops; adapter->dev_stats.tx_drops = tx_drops; u64_stats_update_end(&adapter->syncp); diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index b6c43b58ed3d4360dd76ca7dc187f96b41f2edb9..960d483e899729781fa4170104f4eb60ef5d1f2f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1475,7 +1475,7 @@ static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = lp->ext_phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: spin_lock_irq(&lp->lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 43294a148f8a765f65800ce1a8534bc0d6af5617..4ba75551cb1794fad6fbe429adad8478d049783b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1538,7 +1538,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, any kind of event packet */ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* Fall through - to PTP v1, UDP, any kind of event packet */ + fallthrough; /* to PTP v1, UDP, any kind of event packet */ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); @@ -1549,7 +1549,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, Sync packet */ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* Fall through - to PTP v1, UDP, Sync packet */ + fallthrough; /* to PTP v1, UDP, Sync packet */ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); @@ -1560,7 +1560,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, Delay_req packet */ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* Fall through - to PTP v1, UDP, Delay_req packet */ + fallthrough; /* to PTP v1, UDP, Delay_req packet */ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 46c3c1ca38d60c06ea4c90ee26fb709971c71d0d..859ded0c06b050a896cf002ab6ba258e53bacb40 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -166,6 +166,7 @@ enum xgbe_port_mode { XGBE_PORT_MODE_10GBASE_T, XGBE_PORT_MODE_10GBASE_R, XGBE_PORT_MODE_SFP, + XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG, XGBE_PORT_MODE_MAX, }; @@ -1634,6 +1635,7 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) if (ad_reg & 0x80) { switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: mode = XGBE_MODE_KR; break; default: @@ -1643,6 +1645,7 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) } else if (ad_reg & 0x20) { switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: mode = XGBE_MODE_KX_1000; break; case XGBE_PORT_MODE_1000BASE_X: @@ -1782,6 +1785,7 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: XGBE_SET_ADV(dlks, 10000baseKR_Full); break; case XGBE_PORT_MODE_BACKPLANE_2500: @@ -1874,6 +1878,7 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata) switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return XGBE_AN_MODE_CL73; + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: case XGBE_PORT_MODE_BACKPLANE_2500: return XGBE_AN_MODE_NONE; case XGBE_PORT_MODE_1000BASE_T: @@ -2156,6 +2161,7 @@ static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata) switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: return xgbe_phy_switch_bp_mode(pdata); case XGBE_PORT_MODE_BACKPLANE_2500: return xgbe_phy_switch_bp_2500_mode(pdata); @@ -2251,6 +2257,7 @@ static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata, switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: return xgbe_phy_get_bp_mode(speed); case XGBE_PORT_MODE_BACKPLANE_2500: return xgbe_phy_get_bp_2500_mode(speed); @@ -2426,6 +2433,7 @@ static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: return xgbe_phy_use_bp_mode(pdata, mode); case XGBE_PORT_MODE_BACKPLANE_2500: return xgbe_phy_use_bp_2500_mode(pdata, mode); @@ -2515,6 +2523,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: return xgbe_phy_valid_speed_bp_mode(speed); case XGBE_PORT_MODE_BACKPLANE_2500: return xgbe_phy_valid_speed_bp_2500_mode(speed); @@ -2792,6 +2801,7 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return false; @@ -2844,6 +2854,7 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata) switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: case XGBE_PORT_MODE_BACKPLANE_2500: if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE) return false; @@ -3160,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* Backplane support */ case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_SUP(lks, Autoneg); + fallthrough; + case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG: XGBE_SET_SUP(lks, Pause); XGBE_SET_SUP(lks, Asym_Pause); XGBE_SET_SUP(lks, Backplane); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 16a944707ba908dd556e15cf92681c79bc0aa79a..8941ac4df9e375f8cd631c960946d44c90b591ff 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1631,8 +1631,8 @@ static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp) hw_atl_ts_reset_set(self, 0); } - err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get, - self, val, val == 1, 10000U, 500000U); + err = readx_poll_timeout(hw_atl_b0_ts_ready_and_latch_high_get, self, + val, val == 1, 10000U, 500000U); if (err) return err; diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 0187dbf3b87dfcdaa2dc1bf39f37a4d4bf47c7a6..54cdafdd067db3d19ac81dd7132e09a13fe86445 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(priv->dev, "Failed to request gpio: %d\n", error); + mdiobus_free(bus); return error; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index dfed9ade6950ca8769f57e7470422822ebf63b7f..0762d5d1a810eb2a63d6fc88004ad94e11a4f4a3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2491,8 +2491,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->tx_rings = devm_kcalloc(&pdev->dev, txq, sizeof(struct bcm_sysport_tx_ring), GFP_KERNEL); - if (!priv->tx_rings) - return -ENOMEM; + if (!priv->tx_rings) { + ret = -ENOMEM; + goto err_free_netdev; + } priv->is_lite = params->is_lite; priv->num_rx_desc_words = params->num_rx_desc_words; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 34d18302b1a38e179be8615101f979768a1fd4fb..a5fd161ab5ee15f2b5a5aea77abdf49cac41e2d0 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -217,7 +217,7 @@ static int bgmac_probe(struct bcma_device *core) /* BCM 471X/535X family */ case BCMA_CHIP_ID_BCM4716: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; - /* fallthrough */ + fallthrough; case BCMA_CHIP_ID_BCM47162: bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2; bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 6795b6d95f54d2c1854a396a38376bfbe10a3c22..f37f1c58f3689308082df39fc30ad41dbb4920d9 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -131,7 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev) switch (bgmac->net_dev->phydev->speed) { default: netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n"); - /* fall through */ + fallthrough; case SPEED_1000: val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT; break; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index c8cc14eadbb4af8ae3816707741d0f4a497ab8ae..3e8a179f39db48c8d7f92ab34703636ac5b07d1d 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1337,13 +1337,13 @@ bnx2_set_mac_link(struct bnx2 *bp) val |= BNX2_EMAC_MODE_PORT_MII_10M; break; } - /* fall through */ + fallthrough; case SPEED_100: val |= BNX2_EMAC_MODE_PORT_MII; break; case SPEED_2500: val |= BNX2_EMAC_MODE_25G_MODE; - /* fall through */ + fallthrough; case SPEED_1000: val |= BNX2_EMAC_MODE_PORT_GMII; break; @@ -1995,26 +1995,26 @@ bnx2_remote_phy_event(struct bnx2 *bp) switch (speed) { case BNX2_LINK_STATUS_10HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_10FULL: bp->line_speed = SPEED_10; break; case BNX2_LINK_STATUS_100HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_100BASE_T4: case BNX2_LINK_STATUS_100FULL: bp->line_speed = SPEED_100; break; case BNX2_LINK_STATUS_1000HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_1000FULL: bp->line_speed = SPEED_1000; break; case BNX2_LINK_STATUS_2500HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_2500FULL: bp->line_speed = SPEED_2500; break; @@ -7856,7 +7856,7 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = bp->phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u32 mii_regval; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 1426c691c7c4a416f4f8227ccb0e2d5dbd039107..4e85e7dbc2be310aaecf87bc40af1f41d198946e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -4712,14 +4712,14 @@ static void bnx2x_sync_link(struct link_params *params, LINK_STATUS_SPEED_AND_DUPLEX_MASK) { case LINK_10THD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_10TFD: vars->line_speed = SPEED_10; break; case LINK_100TXHD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_100T4: case LINK_100TXFD: vars->line_speed = SPEED_100; @@ -4727,14 +4727,14 @@ static void bnx2x_sync_link(struct link_params *params, case LINK_1000THD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_1000TFD: vars->line_speed = SPEED_1000; break; case LINK_2500THD: vars->duplex = DUPLEX_HALF; - /* Fall thru */ + fallthrough; case LINK_2500TFD: vars->line_speed = SPEED_2500; break; @@ -6339,7 +6339,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; - /* fall through */ + fallthrough; case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12508,13 +12508,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params, switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_10M_HALF: phy->req_duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case PORT_FEATURE_LINK_SPEED_10M_FULL: phy->req_line_speed = SPEED_10; break; case PORT_FEATURE_LINK_SPEED_100M_HALF: phy->req_duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case PORT_FEATURE_LINK_SPEED_100M_FULL: phy->req_line_speed = SPEED_100; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7f24d2689fdd77efb6204aece5949a8a636d9bf0..3c543dd7a8f306398fd02383aeb9db7354d421c4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8600,11 +8600,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp) bp->num_queues, 1 + bp->num_cnic_queues); - /* fall through */ + fallthrough; case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); - /* fall through */ + fallthrough; case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 80d250a6d048e560a0210e0e493040cdc4613c94..e26f4da5a6d7e4bc4d0ac27eeaff1323ac5f24f6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* fall through */ + fallthrough; /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* fall through */ + fallthrough; /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index b4476f44e3867adf125a1f598b4c18ecfb64634e..9c2f51f230351f429958ba7de932eaf0f6d8ba9c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1809,7 +1809,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); - /* fall through */ + fallthrough; case EVENT_RING_OPCODE_VF_FLR: /* Do nothing for now */ return 0; @@ -2207,7 +2207,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) rc = bnx2x_vf_close(bp, vf); if (rc) goto op_err; - /* Fall through - to release resources */ + fallthrough; /* to release resources */ case VF_ACQUIRED: DP(BNX2X_MSG_IOV, "about to free resources\n"); bnx2x_vf_free_resc(bp, vf); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 31fb5a28e1c4f59d944552be2d3f60b7a61c5bda..b167066af45040c523bb19cc74695c9dca8ae717 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1141,6 +1141,9 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) { + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) + return; + if (BNXT_PF(bp)) queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); else @@ -1157,10 +1160,12 @@ static void bnxt_queue_sp_work(struct bnxt *bp) static void bnxt_cancel_sp_work(struct bnxt *bp) { - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { flush_workqueue(bnxt_pf_wq); - else + } else { cancel_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->fw_reset_task); + } } static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) @@ -1923,7 +1928,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) break; case BNXT_FW_HEALTH_REG_TYPE_GRC: reg_off = fw_health->mapped_regs[reg_idx]; - /* fall through */ + fallthrough; case BNXT_FW_HEALTH_REG_TYPE_BAR0: val = readl(bp->bar0 + reg_off); break; @@ -1966,11 +1971,11 @@ static int bnxt_async_event_process(struct bnxt *bp, } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); } - /* fall through */ + fallthrough; case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); - /* fall through */ + fallthrough; case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; @@ -6102,6 +6107,21 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp) return cp + ulp_stat; } +/* Check if a default RSS map needs to be setup. This function is only + * used on older firmware that does not require reserving RX rings. + */ +static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + /* The RSS map is valid for RX rings set to resv_rx_rings */ + if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { + hw_resc->resv_rx_rings = bp->rx_nr_rings; + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); + } +} + static bool bnxt_need_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -6110,22 +6130,28 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) int rx = bp->rx_nr_rings, stat; int vnic = 1, grp = rx; - if (bp->hwrm_spec_code < 0x10601) - return false; - - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + if (hw_resc->resv_tx_rings != bp->tx_nr_rings && + bp->hwrm_spec_code >= 0x10601) return true; + /* Old firmware does not need RX ring reservations but we still + * need to setup a default RSS map when needed. With new firmware + * we go through RX ring reservations first and then set up the + * RSS map for the successfully reserved RX rings when needed. + */ + if (!BNXT_NEW_RM(bp)) { + bnxt_check_rss_tbl_no_rmgr(bp); + return false; + } if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; stat = bnxt_get_func_stat_ctxs(bp); - if (BNXT_NEW_RM(bp) && - (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || - hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || - (hw_resc->resv_hw_ring_grps != grp && - !(bp->flags & BNXT_FLAG_CHIP_P5)))) + if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || + (hw_resc->resv_hw_ring_grps != grp && + !(bp->flags & BNXT_FLAG_CHIP_P5))) return true; if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && hw_resc->resv_irqs != nq) @@ -6214,6 +6240,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp) if (!tx || !rx || !cp || !grp || !vnic || !stat) return -ENOMEM; + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); + return rc; } @@ -8495,9 +8524,6 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); } - if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); - if (rc) { netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; @@ -9284,16 +9310,19 @@ static ssize_t bnxt_show_temp(struct device *dev, struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; struct bnxt *bp = dev_get_drvdata(dev); - u32 temp = 0; + u32 len = 0; resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) - temp = resp->temp * 1000; /* display millidegree */ + if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - return sprintf(buf, "%u\n", temp); + if (len) + return len; + + return sprintf(buf, "unknown\n"); } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); @@ -9475,15 +9504,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_enable_napi(bp); - bnxt_debug_dev_init(bp); - rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); - goto open_err; + goto open_err_irq; } + bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); + if (link_re_init) { mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); @@ -9514,10 +9543,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_vf_reps_open(bp); return 0; -open_err: - bnxt_debug_dev_exit(bp); - bnxt_disable_napi(bp); - open_err_irq: bnxt_del_napi(bp); @@ -9765,7 +9790,7 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: mdio->phy_id = bp->link_info.phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u16 mii_regval = 0; @@ -11022,7 +11047,7 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) writel(reg_off & BNXT_GRC_BASE_MASK, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; - /* fall through */ + fallthrough; case BNXT_FW_HEALTH_REG_TYPE_BAR0: writel(val, bp->bar0 + reg_off); break; @@ -11135,7 +11160,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; } - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_RESET_FW: bnxt_reset_all(bp); bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; @@ -11158,7 +11183,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } pci_set_master(bp->pdev); bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_POLL_FW: bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; rc = __bnxt_hwrm_ver_get(bp, true); @@ -11173,7 +11198,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; - /* fall through */ + fallthrough; case BNXT_FW_RESET_STATE_OPENING: while (!rtnl_trylock()) { bnxt_queue_fw_reset_work(bp, HZ / 10); @@ -11761,6 +11786,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) unregister_netdev(dev); bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_cancel_sp_work(bp); bp->sp_event = 0; @@ -12200,6 +12226,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_CHIP_P5(bp)) bp->flags |= BNXT_FLAG_CHIP_P5; + rc = bnxt_alloc_rss_indir_tbl(bp); + if (rc) + goto init_err_pci_clean; + rc = bnxt_fw_init_one_p2(bp); if (rc) goto init_err_pci_clean; @@ -12304,11 +12334,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ bp->tx_nr_rings_per_tc = bp->tx_nr_rings; - rc = bnxt_alloc_rss_indir_tbl(bp); - if (rc) - goto init_err_pci_clean; - bnxt_set_dflt_rss_indir_tbl(bp); - if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -12339,6 +12364,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (long)pci_resource_start(pdev, 0), dev->dev_addr); pcie_print_link_status(pdev); + pci_save_state(pdev); return 0; init_err_cleanup: @@ -12536,6 +12562,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); } else { pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); err = bnxt_hwrm_func_reset(bp); if (!err) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 64da654f10389b3d70e30417cf9a18fb97e2ed6b..d0928334bdc85906289f940ee6042db46d719e71 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -472,20 +472,13 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) static int bnxt_get_num_ring_stats(struct bnxt *bp) { int rx, tx, cmn; - bool sh = false; - - if (bp->flags & BNXT_FLAG_SHARED_RINGS) - sh = true; rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + bnxt_get_num_tpa_ring_stats(bp); tx = NUM_RING_TX_HW_STATS; cmn = NUM_RING_CMN_SW_STATS; - if (sh) - return (rx + tx + cmn) * bp->cp_nr_rings; - else - return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + - cmn * bp->cp_nr_rings; + return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings + + cmn * bp->cp_nr_rings; } static int bnxt_get_num_stats(struct bnxt *bp) @@ -806,7 +799,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_tx_sch_inputs; /* Get the most up-to-date max_tx_sch_inputs. */ - if (BNXT_NEW_RM(bp)) + if (netif_running(dev) && BNXT_NEW_RM(bp)) bnxt_hwrm_func_resc_qcaps(bp, false); max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; @@ -1073,7 +1066,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -1092,7 +1085,7 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2323,6 +2316,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) if (rc != 0) return rc; + if (!dir_entries || !entry_length) + return -EIO; + /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 2704a4709bc7b4ca3db612e8082d3047984d119e..fcc262064766a2d432c9006c7206c2e3fca438cf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -201,10 +201,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, break; default: bpf_warn_invalid_xdp_action(act); - /* Fall thru */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(bp->dev, xdp_prog, act); - /* Fall thru */ + fallthrough; case XDP_DROP: bnxt_reuse_rx_data(rxr, cons, page); break; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index c5cca63b8571070715d431470323656875548694..84536292b031319cd9fd88896db6360a5440f166 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3311,7 +3311,7 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info) } case CNIC_CTL_FCOE_STATS_GET_CMD: ulp_type = CNIC_ULP_FCOE; - /* fall through */ + fallthrough; case CNIC_CTL_ISCSI_STATS_GET_CMD: cnic_hold(dev); cnic_copy_ulp_stats(dev, ulp_type); @@ -4044,7 +4044,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) l4kcqe->status, l5kcqe->completion_status); opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; } - /* Fall through */ + fallthrough; case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1fecc25767bd01651c5050b37eb433b00247ec9d..be85dad2e3bc4d0667b7ecd2071a9bfd369634ca 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1185,10 +1185,10 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) continue; case BCMGENET_STAT_RUNT: offset += BCMGENET_STAT_OFFSET; - /* fall through */ + fallthrough; case BCMGENET_STAT_MIB_TX: offset += BCMGENET_STAT_OFFSET; - /* fall through */ + fallthrough; case BCMGENET_STAT_MIB_RX: val = bcmgenet_umac_readl(priv, UMAC_MIB_START + j + offset); @@ -1364,7 +1364,7 @@ static int bcmgenet_validate_flow(struct net_device *dev, case ETHER_FLOW: eth_mask = &cmd->fs.m_u.ether_spec; /* don't allow mask which isn't valid */ - if (VALIDATE_MASK(eth_mask->h_source) || + if (VALIDATE_MASK(eth_mask->h_dest) || VALIDATE_MASK(eth_mask->h_source) || VALIDATE_MASK(eth_mask->h_proto)) { netdev_err(dev, "rxnfc: Unsupported mask\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 511d553a4d113126fa5016bad26013fd38f3e41a..6fb6c3556285485a24eca1f6b33228365a50139c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -192,7 +192,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) switch (priv->phy_interface) { case PHY_INTERFACE_MODE_INTERNAL: phy_name = "internal PHY"; - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_MOCA: /* Irrespective of the actually configured PHY speed (100 or * 1000) GENETv4 only has an internal GPHY so we will just end diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ebff1fc0d8cef5291c7dd05e5654aff849c8adde..5143cdd0eecada87c5827c2de74913a6cc15ce5e 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -715,7 +715,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; - /* fall through */ + fallthrough; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -776,7 +776,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; - /* fall through */ + fallthrough; case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -1586,7 +1586,7 @@ static int tg3_mdio_init(struct tg3 *tp) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; - /* fall through */ + fallthrough; case PHY_ID_RTL8211C: phydev->interface = PHY_INTERFACE_MODE_RGMII; break; @@ -2114,7 +2114,7 @@ static int tg3_phy_init(struct tg3 *tp) phy_support_asym_pause(phydev); break; } - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_MII: phy_set_max_speed(phydev, SPEED_100); phy_support_asym_pause(phydev); @@ -4390,7 +4390,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) MII_TG3_DSP_TAP26_RMRXSTO | MII_TG3_DSP_TAP26_OPCSINPT; tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - /* Fall through */ + fallthrough; case ASIC_REV_5720: case ASIC_REV_5762: if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) @@ -4538,7 +4538,7 @@ static int tg3_phy_pull_config(struct tg3 *tp) tp->link_config.speed = SPEED_1000; break; } - /* Fall through */ + fallthrough; default: goto done; } @@ -5209,7 +5209,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; - /* fall through */ + fallthrough; case ANEG_STATE_AN_ENABLE: ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); if (ap->flags & MR_AN_ENABLE) { @@ -5239,7 +5239,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ret = ANEG_TIMER_ENAB; ap->state = ANEG_STATE_RESTART; - /* fall through */ + fallthrough; case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) @@ -5282,7 +5282,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ap->state = ANEG_STATE_ACK_DETECT; - /* fall through */ + fallthrough; case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == @@ -7221,8 +7221,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp) { - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } @@ -10720,40 +10720,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); - /* fall through */ + fallthrough; case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); - /* fall through */ + fallthrough; case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); - /* fall through */ + fallthrough; case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); - /* fall through */ + fallthrough; case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); - /* fall through */ + fallthrough; case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); - /* fall through */ + fallthrough; case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); - /* fall through */ + fallthrough; case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); - /* fall through */ + fallthrough; case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); - /* fall through */ + fallthrough; case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); - /* fall through */ + fallthrough; case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); - /* fall through */ + fallthrough; case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); - /* fall through */ + fallthrough; case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: @@ -11209,18 +11209,27 @@ static void tg3_reset_task(struct work_struct *work) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, true); - if (err) + if (err) { + tg3_full_unlock(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + /* Clear this flag so that tg3_reset_task_cancel() will not + * call cancel_work_sync() and wait forever. + */ + tg3_flag_clear(tp, RESET_TASK_PENDING); + dev_close(tp->dev); goto out; + } tg3_netif_start(tp); -out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); +out: rtnl_unlock(); } @@ -13998,7 +14007,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = tp->phy_addr; - /* fall through */ + fallthrough; case SIOCGMIIREG: { u32 mii_regval; @@ -17136,7 +17145,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; break; } - /* fallthrough */ + fallthrough; case 128: default: val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; @@ -17151,28 +17160,28 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) DMA_RWCTRL_WRITE_BNDRY_16); break; } - /* fallthrough */ + fallthrough; case 32: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_32 | DMA_RWCTRL_WRITE_BNDRY_32); break; } - /* fallthrough */ + fallthrough; case 64: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_64 | DMA_RWCTRL_WRITE_BNDRY_64); break; } - /* fallthrough */ + fallthrough; case 128: if (goal == BOUNDARY_SINGLE_CACHELINE) { val |= (DMA_RWCTRL_READ_BNDRY_128 | DMA_RWCTRL_WRITE_BNDRY_128); break; } - /* fallthrough */ + fallthrough; case 256: val |= (DMA_RWCTRL_READ_BNDRY_256 | DMA_RWCTRL_WRITE_BNDRY_256); diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 49358d42a0e26760ffaf6337956bb65b6273d595..b9dd06b129454480ce49ad95f8275a508e73a80f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -321,7 +321,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) case IOC_E_PFFAILED: case IOC_E_HWERROR: del_timer(&ioc->ioc_timer); - /* fall through */ + fallthrough; case IOC_E_TIMEOUT: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); @@ -780,7 +780,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_INITFAIL: del_timer(&ioc->iocpf_timer); - /* fall through */ + fallthrough; case IOCPF_E_TIMEOUT: bfa_nw_ioc_hw_sem_release(ioc); @@ -849,7 +849,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_FAIL: del_timer(&ioc->iocpf_timer); - /* fall through*/ + fallthrough; case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 40107a9bd120f94c5eb99ce1e2db20cf36a233a2..a2c983f56b00eb1a88bed5b420a2a74bf7611d9d 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -1084,7 +1084,7 @@ bna_enet_sm_cfg_wait(struct bna_enet *enet, case ENET_E_CHLD_STOPPED: bna_enet_rx_start(enet); - /* Fall through */ + fallthrough; case ENET_E_FWRESP_PAUSE: if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index b5ecbfe13ab09edf812c966ed6d5d89d517f8072..2623a0da468206d21e1081a64e00b4412bec8ff2 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -1636,7 +1636,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) &q1->qpt); cfg_req->q_cfg[i].qs.rx_buffer_size = htons((u16)q1->buffer_size); - /* Fall through */ + fallthrough; case BNA_RXP_SINGLE: /* Large/Single RxQ */ diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 31ebf3ee7ec0e117c7afa210564f05da1de3acdb..283918aeb741d59f8260a34c49360932c14f6ac3 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -460,7 +460,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) case HWTSTAMP_TX_ONESTEP_SYNC: if (gem_ptp_set_one_step_sync(bp, 1) != 0) return -ERANGE; - /* fall through */ + fallthrough; case HWTSTAMP_TX_ON: tx_bd_control = TSTAMP_ALL_FRAMES; break; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e73bc211779a0967bb130cc85c9a191cfa575d44..8e0ed01e7f038bf174b404b968a1a919460a728e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -977,15 +977,14 @@ static void octeon_destroy_resources(struct octeon_device *oct) schedule_timeout_uninterruptible(HZ / 10); - /* fallthrough */ + fallthrough; case OCT_DEV_HOST_OK: - /* fallthrough */ case OCT_DEV_CONSOLE_INIT_DONE: /* Remove any consoles */ octeon_remove_consoles(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_IO_QUEUES_DONE: if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); @@ -1027,7 +1026,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_free_sc_done_list(oct); octeon_free_sc_zombie_list(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -1062,17 +1061,17 @@ static void octeon_destroy_resources(struct octeon_device *oct) kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; - /* fallthrough */ + fallthrough; case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: if (OCTEON_CN23XX_PF(oct)) octeon_free_ioq_vector(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_MBOX_SETUP_DONE: if (OCTEON_CN23XX_PF(oct)) oct->fn_list.free_mbox(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: /* Wait for any pending operations */ @@ -1095,11 +1094,11 @@ static void octeon_destroy_resources(struct octeon_device *oct) } } - /* fallthrough */ + fallthrough; case OCT_DEV_RESP_LIST_INIT_DONE: octeon_delete_response_list(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) @@ -1110,16 +1109,16 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->sriov_info.sriov_enabled) pci_disable_sriov(oct->pci_dev); #endif - /* fallthrough */ + fallthrough; case OCT_DEV_SC_BUFF_POOL_INIT_DONE: octeon_free_sc_buffer_pool(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_DISPATCH_INIT_DONE: octeon_delete_dispatch_list(oct); cancel_delayed_work_sync(&oct->nic_poll_work.work); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_MAP_DONE: refcount = octeon_deregister_device(oct); @@ -1137,13 +1136,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_ENABLE_DONE: pci_clear_master(oct->pci_dev); /* Disable the device, releasing the PCI INT */ pci_disable_device(oct->pci_dev); - /* fallthrough */ + fallthrough; case OCT_DEV_BEGIN_STATE: /* Nothing to be done here either */ break; @@ -2168,7 +2167,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (lio->oct_dev->ptp_enable) return hwtstamp_ioctl(netdev, ifr); - /* fall through */ + fallthrough; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 90ef21086f271befe92372a0cea2aa1f72dbc4bb..8c5879e31240fd1e2dff04fcaf2741d2edd46007 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -460,9 +460,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) schedule_timeout_uninterruptible(HZ / 10); - /* fallthrough */ + fallthrough; case OCT_DEV_HOST_OK: - /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); @@ -504,7 +503,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_free_sc_done_list(oct); octeon_free_sc_zombie_list(oct); - /* fall through */ + fallthrough; case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -533,15 +532,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) else cn23xx_vf_ask_pf_to_do_flr(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: octeon_free_ioq_vector(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_MBOX_SETUP_DONE: oct->fn_list.free_mbox(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: mdelay(100); @@ -551,11 +550,11 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_delete_droq(oct, i); } - /* fallthrough */ + fallthrough; case OCT_DEV_RESP_LIST_INIT_DONE: octeon_delete_response_list(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) @@ -563,27 +562,27 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_delete_instr_queue(oct, i); } - /* fallthrough */ + fallthrough; case OCT_DEV_SC_BUFF_POOL_INIT_DONE: octeon_free_sc_buffer_pool(oct); - /* fallthrough */ + fallthrough; case OCT_DEV_DISPATCH_INIT_DONE: octeon_delete_dispatch_list(oct); cancel_delayed_work_sync(&oct->nic_poll_work.work); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_MAP_DONE: octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); - /* fallthrough */ + fallthrough; case OCT_DEV_PCI_ENABLE_DONE: pci_clear_master(oct->pci_dev); /* Disable the device, releasing the PCI INT */ pci_disable_device(oct->pci_dev); - /* fallthrough */ + fallthrough; case OCT_DEV_BEGIN_STATE: /* Nothing to be done here either */ break; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 83dabcffc789a27512f55cf25a4039203a170f90..c7bdac79299ac5ae3697aa49fcde5ca96be5dc6c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -522,7 +522,7 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic, case SCTP_V4_FLOW: case SCTP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case IPV4_FLOW: case IPV6_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c1378b5c780cf82fba99364d82118febc38fa229..063e560d9c1b3066a9b0f1cc4448ff84a049a1be 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -594,10 +594,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, return true; default: bpf_warn_invalid_xdp_action(action); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(nic->netdev, prog, action); - /* fall through */ + fallthrough; case XDP_DROP: /* Check if it's a recycled page, if not * unmap the DMA mapping. diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 42c6e9379882f78f375236bf43db55432db722ec..387c357e1b8e2ab90eefa16c1b3d92ef48de4c78 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2543,7 +2543,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) !(data->phy_id & 0xe0e0)) data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, data->phy_id & 0x1f); - /* FALLTHRU */ + fallthrough; case SIOCGMIIPHY: return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); case SIOCCHIOCTL: diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index b3e4118a15e700364292aba4c343f3dc67d9e849..9749d1239f58e121aed71e9360913612dd293669 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -136,7 +136,7 @@ int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); - /* fall through */ + fallthrough; case L2T_STATE_VALID: /* fast-path, send the packet on */ return cxgb3_ofld_send(dev, skb); case L2T_STATE_RESOLVING: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c index e3510e9b21f3ceed337f92ede334d50a9efd7c7c..9a6d65243334af17beac881dd124b5a62ce9d9bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -62,6 +62,7 @@ static struct thermal_zone_device_ops cxgb4_thermal_ops = { int cxgb4_thermal_init(struct adapter *adap) { struct ch_thermal *ch_thermal = &adap->ch_thermal; + char ch_tz_name[THERMAL_NAME_LENGTH]; int num_trip = CXGB4_NUM_TRIPS; u32 param, val; int ret; @@ -82,7 +83,8 @@ int cxgb4_thermal_init(struct adapter *adap) ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; } - ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, + snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name); + ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip, 0, adap, &cxgb4_thermal_ops, NULL, 0, 0); @@ -105,7 +107,9 @@ int cxgb4_thermal_init(struct adapter *adap) int cxgb4_thermal_remove(struct adapter *adap) { - if (adap->ch_thermal.tzdev) + if (adap->ch_thermal.tzdev) { thermal_zone_device_unregister(adap->ch_thermal.tzdev); + adap->ch_thermal.tzdev = NULL; + } return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index c4864125fe026cd7fc6db8cea20d985e0fede19b..a10a6862a9a465e5abb8ac2120bf88bba5b3d0bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -231,7 +231,7 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, if (e->state == L2T_STATE_STALE) e->state = L2T_STATE_VALID; spin_unlock_bh(&e->lock); - /* fall through */ + fallthrough; case L2T_STATE_VALID: /* fast-path, send the packet on */ return t4_ofld_send(adap, skb); case L2T_STATE_RESOLVING: diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index d2b587d1670aa66df330341993bc42fdf69cd6e9..869431a1eedd482776bdd3a298883dc24aabfc67 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2553,19 +2553,22 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev) pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR); - flits = DIV_ROUND_UP(pkt_len + sizeof(struct cpl_tx_pkt) + - sizeof(*wr), sizeof(__be64)); + flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr), + sizeof(__be64)); ndesc = flits_to_desc(flits); lb = &pi->ethtool_lb; lb->loopback = 1; q = &adap->sge.ethtxq[pi->first_qset]; + __netif_tx_lock(q->txq, smp_processor_id()); reclaim_completed_tx(adap, &q->q, -1, true); credits = txq_avail(&q->q) - ndesc; - if (unlikely(credits < 0)) + if (unlikely(credits < 0)) { + __netif_tx_unlock(q->txq); return -ENOMEM; + } wr = (void *)&q->q.desc[q->q.pidx]; memset(wr, 0, sizeof(struct tx_desc)); @@ -2598,6 +2601,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev) init_completion(&lb->completion); txq_advance(&q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc); + __netif_tx_unlock(q->txq); /* wait for the pkt to return */ ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8a56491bb034145bf75a22bfd0aa2be6b54f98c6..fa3367966f4b5bddd9e88c51aa5c3655add23e10 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7656,13 +7656,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); - /* Fall through */ + fallthrough; case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); - /* Fall through */ + fallthrough; case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); - /* Fall through */ + fallthrough; case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index dbe8ee7e0e212f5ec9d129a0954055c2407dcceb..e2fe78e2e24241f64c8a2fbc4ac7b8fdf7414dff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -517,7 +517,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, } cpl = (void *)p; } - /* Fall through */ + fallthrough; case CPL_SGE_EGR_UPDATE: { /* diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 6bc7e7ba38c3df1959df831823f35a1d818c4c55..552d89fdf54a52119ff3dea0021e0933feca3ca6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -272,7 +272,7 @@ static netdev_features_t enic_features_check(struct sk_buff *skb, case ntohs(ETH_P_IPV6): if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) goto out; - /* Fall through */ + fallthrough; case ntohs(ETH_P_IP): break; default: diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 66e67b24a887c572c5c94309c540df22fb7cd224..ffec0f3dd9578e1d0eb61a9f1bb6fc36522bfc1b 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2389,7 +2389,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); - netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM); + netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); if (!netdev) { dev_err(dev, "Can't allocate ethernet device #%d\n", id); return -ENOMEM; @@ -2446,8 +2446,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); - clk_disable_unprepare(port->pclk); - return PTR_ERR(port->reset); + ret = PTR_ERR(port->reset); + goto unprepare; } reset_control_reset(port->reset); usleep_range(100, 500); @@ -2502,26 +2502,25 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) IRQF_SHARED, port_names[port->id], port); - if (ret) { - clk_disable_unprepare(port->pclk); - return ret; - } + if (ret) + goto unprepare; ret = register_netdev(netdev); - if (!ret) { + if (ret) + goto unprepare; + + netdev_info(netdev, + "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", + port->irq, &dmares->start, + &gmacres->start); + ret = gmac_setup_phy(netdev); + if (ret) netdev_info(netdev, - "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", - port->irq, &dmares->start, - &gmacres->start); - ret = gmac_setup_phy(netdev); - if (ret) - netdev_info(netdev, - "PHY init failed, deferring to ifup time\n"); - return 0; - } + "PHY init failed, deferring to ifup time\n"); + return 0; - port->netdev = NULL; - free_netdev(netdev); +unprepare: + clk_disable_unprepare(port->pclk); return ret; } @@ -2530,7 +2529,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - free_netdev(port->netdev); return 0; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 7f770513826207de3a9a15e39a06ac9b35307a9a..5c6c8c5ec7471a2bd8059ab571daad22f6194fb0 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -385,7 +385,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width) case 3: dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); - /* fall through */ + fallthrough; case 2: db->dumpblk = dm9000_dumpblk_16bit; db->outblk = dm9000_outblk_16bit; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 0ccd9994ad4574162f26d1cea72e4e5534780cc9..f9dd1aa9f2da399576e5e17775386abe75ed017d 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -3203,7 +3203,7 @@ srom_map_media(struct net_device *dev) case SROM_10BASETF: if (!lp->params.fdx) return -1; lp->fdx = true; - /* fall through */ + fallthrough; case SROM_10BASET: if (lp->params.fdx && !lp->fdx) return -1; @@ -3225,7 +3225,7 @@ srom_map_media(struct net_device *dev) case SROM_100BASETF: if (!lp->params.fdx) return -1; lp->fdx = true; - /* fall through */ + fallthrough; case SROM_100BASET: if (lp->params.fdx && !lp->fdx) return -1; @@ -3239,7 +3239,7 @@ srom_map_media(struct net_device *dev) case SROM_100BASEFF: if (!lp->params.fdx) return -1; lp->fdx = true; - /* fall through */ + fallthrough; case SROM_100BASEF: if (lp->params.fdx && !lp->fdx) return -1; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 9db23527275a376ee7ea0b41309606d42592bedc..3a8659c5da06434eb7add42fa3df0379ff92ad1a 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -911,7 +911,7 @@ static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) data->phy_id = 1; else return -ENODEV; - /* Fall through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 5dcc66f601444a02d28a619077e460de7c75a273..5a43be327f588aac718506da87af7c34f29ee63f 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1443,7 +1443,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irq(&np->lock); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index d6ed1d9437625bafd4002275cd27cbf032d44eed..99cc1c46fb301a21a542fc6b108ecf04b1c9f89f 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -571,7 +571,7 @@ static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds) break; } } - /* fall through */ + fallthrough; case PHY_TYPE_SFP_PLUS_10GB: case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 43570f4911ea1276275617b21572a2f1b08b4020..fdff3b4723badf787488a14c77cc394da6dac83d 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -945,7 +945,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; - /* fall through */ + fallthrough; case FQ_TYPE_TX_CONFIRM: dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); break; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 9db2a02fb53178049f045f872343257d671c233e..1268996b703015a86fbcfa8404ffa9d587ac621c 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -375,7 +375,7 @@ static int dpaa_get_hash_opts(struct net_device *dev, case UDP_V6_FLOW: if (priv->keygen_in_use) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case IPV4_FLOW: case IPV6_FLOW: case SCTP_V4_FLOW: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 457106e761be07151e689907a352b1d512b598cc..cf5383bb83319a5e4bd81c1985835d81acc4ee43 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -376,10 +376,10 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, break; default: bpf_warn_invalid_xdp_action(xdp_act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); - /* fall through */ + fallthrough; case XDP_DROP: xdp_release_buf(priv, ch, addr); ch->stats.xdp_drop++; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9934421814b41b751447d5b36efd90c1727f50e3..fb37816a74db9c08a8ddc145641a3e83f9eea0e0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3715,11 +3715,11 @@ fec_probe(struct platform_device *pdev) failed_irq: failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); failed_clk_ahb: diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 645764abdaae595b32c9aeee76c85657e0e23d69..bb9887f988411d86ff72c543bd7c1ebb66fe41ba 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -528,7 +528,7 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, case 100: tmp_reg16 |= IF_MODE_SGMII_SPEED_100M; break; - case 1000: /* fallthrough */ + case 1000: default: tmp_reg16 |= IF_MODE_SGMII_SPEED_1G; break; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index c27df153f8959a509d0c8c0e30c95b3ffd49c914..624b2eb6f01decb50dfa0da9b59700820d19a3d6 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1344,10 +1344,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) switch (port->port_type) { case FMAN_PORT_TYPE_RX: set_rx_dflt_cfg(port, params); - /* fall through */ + fallthrough; case FMAN_PORT_TYPE_TX: set_tx_dflt_cfg(port, params, &port->dts_params); - /* fall through */ + fallthrough; default: set_dflt_cfg(port, params); } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b513b8c5c3b5e5be7027e36acad20f6a15b9c991..41dd3d0f3452442d60383e15b84cdac050673d6d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -750,8 +750,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) continue; err = gfar_parse_group(child, priv, model); - if (err) + if (err) { + of_node_put(child); goto err_grp_init; + } } } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index db791f60b884621cb0497e56d21ebd837f45daa6..714b501be7d09f805057f21222d8086900c53dc9 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1348,7 +1348,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) switch (ugeth->max_speed) { case SPEED_10: upsmr |= UCC_GETH_UPSMR_R10M; - /* FALLTHROUGH */ + fallthrough; case SPEED_100: if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) upsmr |= UCC_GETH_UPSMR_RMM; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 23f278e46975bf4b58668af1c35567584507422f..22522f8a5299984d60598a680909e13ca25bd7b3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2282,8 +2282,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + } /* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@ -2299,7 +2301,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; } ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 49624acf24739a085bd75f8d548979dd63660fe4..4eb50296f6538b3bfd6bf635eb5bb2197469d18d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -305,7 +305,7 @@ static int __lb_setup(struct net_device *ndev, break; case MAC_LOOP_PHY_NONE: ret = hns_nic_config_phy_loopback(phy_dev, 0x0); - /* fall through */ + fallthrough; case MAC_LOOP_NONE: if (!ret && h->dev->ops->set_loopback) { if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 87776ce3539b487523965c3fe549ff57f1aa2aaf..a4f1d515e5e09b6ce151295f255af25d0048c1f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "hnae3.h" #include "hns3_enet.h" @@ -780,7 +781,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the * hardware will not do the checksum offload when udp dest port is - * 4789. + * 4789 or 6081. */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { @@ -789,7 +790,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) l4.hdr = skb_transport_header(skb); if (!(!skb->encapsulation && - l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) + (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || + l4.udp->dest == htons(GENEVE_UDP_PORT)))) return false; skb_checksum_help(skb); @@ -2746,7 +2748,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; - /* fall through */ + fallthrough; case HNS3_OL4_TYPE_NO_TUN: l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 36575e72a915e86de79dd0495b3b3854f89825fa..d553ed7ee64c0aef5030f7ed550a23e41ac49a59 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3061,7 +3061,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) * by first decoding the types of errors. */ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); - /* fall through */ + fallthrough; case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; @@ -3686,12 +3686,10 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev) switch (hdev->reset_type) { case HNAE3_FUNC_RESET: - /* fall through */ case HNAE3_FLR_RESET: ret = hclge_set_all_vf_rst(hdev, false); break; case HNAE3_GLOBAL_RESET: - /* fall through */ case HNAE3_IMP_RESET: ret = hclge_set_rst_done(hdev); break; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 0273fb7a9d015eccc8879d8c6b1ff76a38e30496..3153d62cc73e38724a365ca58c461144124168d3 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3247,7 +3247,7 @@ static int ehea_mem_notifier(struct notifier_block *nb, switch (action) { case MEM_CANCEL_OFFLINE: pr_info("memory offlining canceled"); - /* Fall through - re-add canceled memory block */ + fallthrough; /* re-add canceled memory block */ case MEM_ONLINE: pr_info("memory is going online"); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 06248a7db7f267afdad84c502952d7afc04b32b0..c00b9097eeea413755ead5d1f6e0949be72abe21 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2319,7 +2319,7 @@ static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = dev->phy.address; - /* Fall through */ + fallthrough; case SIOCGMIIREG: data->val_out = emac_mdio_read(ndev, dev->phy.address, data->reg_num); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5afb3c9c52d20a44d1cef41b0b1465e51269e9fe..d3a774331afc7a71eab32652f214b4c3751f8cd5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -479,6 +479,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) int i, j, rc; u64 *size_array; + if (!adapter->rx_pool) + return -1; + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); @@ -649,6 +652,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) int tx_scrqs; int i, rc; + if (!adapter->tx_pool) + return -1; + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); @@ -2011,7 +2017,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, adapter->req_rx_add_entries_per_subcrq != old_num_rx_slots || adapter->req_tx_entries_per_subcrq != - old_num_tx_slots) { + old_num_tx_slots || + !adapter->rx_pool || + !adapter->tso_pool || + !adapter->tx_pool) { release_rx_pools(adapter); release_tx_pools(adapter); release_napi(adapter); @@ -2024,10 +2033,14 @@ static int do_reset(struct ibmvnic_adapter *adapter, } else { rc = reset_tx_pools(adapter); if (rc) + netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + rc); goto out; rc = reset_rx_pools(adapter); if (rc) + netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + rc); goto out; } ibmvnic_disable_irqs(adapter); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 63dde3bcf5bc7991b8e90a593617fdd811f9126e..664e8ccc88d22df6c891c819b6da9c6bc3a383c0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4079,7 +4079,6 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: - fallthrough; case e1000_pch_tgp: case e1000_pch_adp: fc->refresh_time = 0xFFFF; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index a62ddd626929cf790031f11ab37d226ec926ef40..c0c8efe42fcec9c578b706464be4b2abdf9ea68d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -981,7 +981,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 __le16 seid; __le16 vlan_tag; #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index afad5e9f80e027b28b30854915673178c8e9d8d5..6ab52cbd697a83319573040ec5ac9de096560687 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1966,6 +1966,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, return status; } +/** + * i40e_is_aq_api_ver_ge + * @aq: pointer to AdminQ info containing HW API version to compare + * @maj: API major value + * @min: API minor value + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, + u16 min) +{ + return (aq->api_maj_ver > maj || + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct @@ -2091,18 +2106,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1))) - flags |= I40E_AQC_SET_VSI_PROMISC_TX; + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2199,11 +2212,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) + if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b5399357a66797644f129b9b01a5d95770a8708f..2e433fdbf2c36659fe3706ef3872a9bea5449a91 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15463,6 +15463,9 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + usleep_range(1000, 2000); + /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4f05f6efe6af168f53fbe40db6afef6f96f19725..d9c3a6b169f90f349ec044f568f90a48fb924b6d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -718,7 +718,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) case e1000_i354: case e1000_i210: case e1000_i211: - fallthrough; default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7a6f2a0d413ff3004972c536c07049880c4216fb..9593aa4eea369e78d37d137ed5db7c3a13c7f9ed 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -5142,6 +5142,8 @@ static int igc_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGC_FLAG_WOL_SUPPORTED); + igc_ptp_init(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); @@ -5158,9 +5160,6 @@ static int igc_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - /* do hw tstamp init after resetting */ - igc_ptp_init(adapter); - /* Check if Media Autosense is enabled */ adapter->ei = *ei; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index e67d4655b47e663ee99b9276d3de2c296fcbd423..36c999250fcc21168d9f3446187d95c7c0c6991d 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -496,8 +496,6 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - igc_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index e67b1a59ecb7c43f11d54af3831787cfb57daf88..0fcd82036d4e3fc110dccfad0c23c11430388d91 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -193,7 +193,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 832bbb8b05c80d814f8d3dee7c7c270a54d2e0f7..dfcb1767acbb307ea820f8d11f3375807861dcfb 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2205,10 +2205,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(pp->dev, prog, act); - /* fall through */ + fallthrough; case XDP_DROP: mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); ret = MVNETA_XDP_DROPPED; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index d4a4e241333d8a120b4f14d339f2ae3dba22c8d4..41d935d1aaf6ff075e51b13e065d565a2ced4ae7 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1638,7 +1638,7 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; if (info->data & RXH_L4_B_2_3) hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; - /* Fallthrough */ + fallthrough; case MVPP22_FLOW_IP4: case MVPP22_FLOW_IP6: if (info->data & RXH_L2DA) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2a8a5842eaefdf8d2d38795a3f9c26fb723a8ecd..6e140d1b8967cac0a7b05e8138d96227f3fac6fa 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5437,7 +5437,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config, } if (state->interface != PHY_INTERFACE_MODE_NA) break; - /* Fall-through */ + fallthrough; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: @@ -5451,7 +5451,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config, phylink_set(mask, 1000baseX_Full); if (state->interface != PHY_INTERFACE_MODE_NA) break; - /* Fall-through */ + fallthrough; case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: if (port->comphy || diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 36953d4f51c73af7a710e1397a8431e5ab863a6a..01a793105599fbfcc78b5867d13b1442f1f578b5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -737,7 +737,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, sizeof(struct nix_rx_mce_s)); - /* Fall through */ + fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 5975521a4c8614d84f638856dda266071a27180b..93c4cf7fedbfa960cac7c8b39c44633eb58ab7f4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1226,8 +1226,8 @@ int otx2_config_npa(struct otx2_nic *pfvf) if (!hw->pool_cnt) return -EINVAL; - qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) * - hw->pool_cnt, GFP_KERNEL); + qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, + sizeof(struct otx2_pool), GFP_KERNEL); if (!qset->pool) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index b792f6306a641448b91edcda45a77ba24f75d498..6a930351cb239d4b330076d2446cb62f674d6f74 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2448,7 +2448,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = hw->phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u16 val = 0; spin_lock_bh(&hw->phy_lock); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cec8124301c7f5683c41be085e0de9832f6dc23e..344864275ed56a6a323a19645f8a13d62b39db02 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1376,7 +1376,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = PHY_ADDR_MARV; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u16 val = 0; @@ -2764,7 +2764,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) case OP_RXCHKSVLAN: sky2_rx_tag(sky2, length); - /* fall through */ + fallthrough; case OP_RXCHKS: if (likely(dev->features & NETIF_F_RXCSUM)) sky2_rx_checksum(sky2, status); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 0870fe78ea38c100c578a38800c0b079c2432860..6d2d60675ffd7d293a6a7f7f8283f6a08342fd38 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -228,7 +228,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, if (!MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) goto err_phy; - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: @@ -501,11 +501,11 @@ static void mtk_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: phylink_set(mask, 1000baseT_Half); - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_SGMII: phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); - /* fall through */ + fallthrough; case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_RMII: case PHY_INTERFACE_MODE_REVMII: diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d2986f1f2db02a9380e1184057d114295dd2d60d..d7444782bfdd02863faa615a144c205b9b79666f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!buddy->bits[i]) goto err_out_free; diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 7a04c626a2aa50efed43785074494598d826b7cc..bcd166911d444b13dcbe7b390073368116a1ac34 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -72,7 +72,7 @@ static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev, case MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET: MLXFW_ERR_MSG(mlxfw_dev, extack, "pending reset", err); break; - case MLXFW_FSM_STATE_ERR_OK: /* fall through */ + case MLXFW_FSM_STATE_ERR_OK: case MLXFW_FSM_STATE_ERR_MAX: MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err); break; @@ -155,7 +155,7 @@ mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev, case MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED: MLXFW_REACT_ERR("fw already activated", err); break; - case MLXFW_FSM_REACTIVATE_STATUS_OK: /* fall through */ + case MLXFW_FSM_REACTIVATE_STATUS_OK: case MLXFW_FSM_REACTIVATE_STATUS_MAX: MLXFW_REACT_ERR("unexpected error", err); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 08d101138fbe177ffc55d9df91b9f932c36b934c..ec45a03140d7f4e969be6fd43041109806ecf1c0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2289,21 +2289,21 @@ int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module) /* Here we need to get the module width according to the module type. */ switch (module_type) { - case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: + case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: case MLXSW_REG_PMTM_MODULE_TYPE_OSFP: return 8; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: + case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: case MLXSW_REG_PMTM_MODULE_TYPE_QSFP: return 4; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: + case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: + case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: case MLXSW_REG_PMTM_MODULE_TYPE_DSFP: return 2; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: /* fall through */ - case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: /* fall through */ + case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: + case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: case MLXSW_REG_PMTM_MODULE_TYPE_SFP: return 1; default: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 44fa02cbb683db24cc4115030586d66c37de3306..056eeb85be6047f87a40bfb74a5965e1d1f17aab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -30,8 +30,8 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: *qsfp = false; break; - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */ - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: *qsfp = true; break; @@ -205,7 +205,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; break; - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 || module_rev_id >= diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 3fe878d7c94cb3edf5c5fb59f0a3024ecf417df0..61719ec89808f8fbf80d813501439727c57cc845 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -259,8 +259,8 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, */ fault = 1; break; - case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ - case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_NO_CONN: + case MLXSW_REG_MTBR_NO_TEMP_SENS: case MLXSW_REG_MTBR_INDEX_NA: default: fault = 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index fdf9aa8314b29933b0739edab74a9751e1ed87b5..4186e29119c27d134ee867ddb86cca418f2d7d95 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -517,8 +517,8 @@ enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) return MLXSW_REG_SPMS_STATE_FORWARDING; case BR_STATE_LEARNING: return MLXSW_REG_SPMS_STATE_LEARNING; - case BR_STATE_LISTENING: /* fall-through */ - case BR_STATE_DISABLED: /* fall-through */ + case BR_STATE_LISTENING: + case BR_STATE_DISABLED: case BR_STATE_BLOCKING: return MLXSW_REG_SPMS_STATE_DISCARDING; default: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f9ba59641b4df93dc1f7c495a098b879e502a7a3..5240bf11b6c42d310b9e660cdd40ac3546a000a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -636,11 +636,11 @@ static inline unsigned int mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) { switch (type) { - case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ - case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: + case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: + case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: + case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: + case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: default: return 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0521e9d48c457df8c3b1a5469e176cd4c07b45aa..24f1fd1f8d5619ecb757c31355fb8a77f0358917 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1164,7 +1164,7 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, addr_len = 4; addr_prefix_len = 32; break; - case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ + case MLXSW_SP_L3_PROTO_IPV6: default: WARN_ON(1); return NULL; @@ -4555,14 +4555,14 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; return 0; } - /* fall through */ + fallthrough; case RTN_BROADCAST: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; case RTN_BLACKHOLE: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; return 0; - case RTN_UNREACHABLE: /* fall through */ + case RTN_UNREACHABLE: case RTN_PROHIBIT: /* Packets hitting these routes need to be trapped, but * can do so with a lower priority than packets directed @@ -5990,7 +5990,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, fib_work->fnh_info.fib_nh); @@ -6050,7 +6050,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) rtnl_lock(); mutex_lock(&mlxsw_sp->router->lock); switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; @@ -6089,7 +6089,7 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_nh_notifier_info *fnh_info; switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); @@ -6099,7 +6099,7 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, info); @@ -6116,8 +6116,8 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, int err; switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_APPEND: case FIB_EVENT_ENTRY_DEL: fen6_info = container_of(info, struct fib6_entry_notifier_info, info); @@ -6136,13 +6136,13 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_ADD: case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); mr_cache_hold(fib_work->men_info.mfc); break; - case FIB_EVENT_VIF_ADD: /* fall through */ + case FIB_EVENT_VIF_ADD: case FIB_EVENT_VIF_DEL: memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); dev_hold(fib_work->ven_info.dev); @@ -6215,13 +6215,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, router = container_of(nb, struct mlxsw_sp_router, fib_nb); switch (event) { - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: err = mlxsw_sp_router_fib_rule_event(event, info, router->mlxsw_sp); return notifier_from_errno(err); - case FIB_EVENT_ENTRY_ADD: /* fall through */ - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_APPEND: if (router->aborted) { NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); @@ -7277,7 +7277,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, goto out; switch (event) { - case NETDEV_CHANGEMTU: /* fall through */ + case NETDEV_CHANGEMTU: case NETDEV_CHANGEADDR: err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 5c959a995199619fbcab695f02055abe8457bb6d..1d18e41ab2553815c31e0952a91fdc739a9c2779 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -1523,12 +1523,12 @@ mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) enum mlxsw_sp_span_trigger_type type; switch (trigger_entry->trigger) { - case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_INGRESS: case MLXSW_SP_SPAN_TRIGGER_EGRESS: type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; break; - case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */ - case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: case MLXSW_SP_SPAN_TRIGGER_ECN: type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a26162b08b7d8de47bb562cc5d2d6321788cb51a..72912afa6f72a2637653b53144fafcb239c2d0b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1297,7 +1297,7 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, uip = be32_to_cpu(addr->addr4); sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; break; - case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ + case MLXSW_SP_L3_PROTO_IPV6: default: WARN_ON(1); return -EOPNOTSUPP; @@ -2870,7 +2870,7 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) fdb_info = &switchdev_work->fdb_info; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; - case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE: /* These events are only used to potentially update an existing * SPAN mirror. @@ -3116,9 +3116,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE: fdb_info = container_of(info, struct switchdev_notifier_fdb_info, @@ -3138,7 +3138,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, */ dev_hold(dev); break; - case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_vxlan_fdb_event_work); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index c533d06fbe3a599e40c4d51824f6b474eb0408d8..dcde496da7fb4b6c7a79f723d99a733e9493d49f 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -548,7 +548,7 @@ static int lan743x_ethtool_get_rxnfc(struct net_device *netdev, case TCP_V4_FLOW:case UDP_V4_FLOW: case TCP_V6_FLOW:case UDP_V6_FLOW: rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case IPV4_FLOW: case IPV6_FLOW: rxnfc->data |= RXH_IP_SRC | RXH_IP_DST; return 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 867c680f5917787388083081b349d47740a3c215..5abb7d2b0a9e008f8c08e5a188400f2d62115a7b 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -859,7 +859,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) switch (state) { case BR_STATE_FORWARDING: ocelot->bridge_fwd_mask |= BIT(port); - /* Fallthrough */ + fallthrough; case BR_STATE_LEARNING: port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; break; diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index c2867fe995bc5eb1f9690777e87f6fb78f6e31e3..3de8430ee8c5d15e32cbe0f5ec003cf6eb605e57 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -3081,7 +3081,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phy_addr_external; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ /* The phy_id is not enough to uniquely identify diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 4f1f90f5e178a6ed07bf0ec9451b863151fe69aa..78eba10300ae6d217a142d2cbb09972bbd1be2ce 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -3768,20 +3768,20 @@ vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( itable[j]); - /* fall through */ + fallthrough; case 2: *data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( itable[j]); - /* fall through */ + fallthrough; case 3: *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( itable[j]); - /* fall through */ + fallthrough; case 4: *data1 |= VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 7c50e3dfb9d525d4b6ad349101baf01038754e2e..76c51da5b66f3191ff29191f6f148b1879c0f9ae 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -296,7 +296,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, break; } #endif - /* fall through */ + fallthrough; case AF_INET: req_sz = sizeof(struct nfp_crypto_req_add_v4); ipv6 = false; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index ff844e5cc41fb4741b5ac2dadca26a4de35ef9d3..1cbe2c9f3959f9859915b9ad53508d4e709cb6a8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -297,7 +297,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app, case htons(GENEVE_UDP_PORT): if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) return NFP_FL_TUNNEL_GENEVE; - /* FALLTHROUGH */ + fallthrough; default: return NFP_FL_TUNNEL_NONE; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index a050cb898782849c1ad18a91e1b89396b045aa8f..f21cf1f40f98734cd2bd44835cba017cc0c3a67e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -289,7 +289,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); break; } - /* fall through */ + fallthrough; default: err_default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 4651fe417b7f87a0ed38930e47c86e56bf50a7c6..36356f96661d7b1dad93f9b0b156cae5c4a97740 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -784,7 +784,7 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: if (tunnel_act) *tunnel_act = true; - /* fall through */ + fallthrough; case NFP_FL_ACTION_OPCODE_PRE_LAG: memcpy(act_dst + act_off, act_src + act_off, act_len); break; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 2df3deedf9fd85bd4de664e333a468eb923a499a..7248d248f60416f18b7263622b2866da8627e410 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -61,6 +61,7 @@ struct nfp_tun_active_tuns { * @flags: options part of the request * @tun_info.ipv6: dest IPv6 address of active route * @tun_info.egress_port: port the encapsulated packet egressed + * @tun_info.extra: reserved for future use * @tun_info: tunnels that have sent traffic in reported period */ struct nfp_tun_active_tuns_v6 { @@ -70,6 +71,7 @@ struct nfp_tun_active_tuns_v6 { struct route_ip_info_v6 { struct in6_addr ipv6; __be32 egress_port; + __be32 extra[2]; } tun_info[]; }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index b04b83687fe219d98b8a9e1c2b21d1970ac409e0..2643ea5948f48f0d94fb5b83a6ce364b1932c3c1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -137,7 +137,7 @@ static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) val; case NN_LM_MOD_DEC: lm_dec = true; - /* fall through */ + fallthrough; case NN_LM_MOD_INC: if (val) { pr_err("LM offset in inc/dev mode\n"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 39ee23e8c0bfd6e8b41d5402449cbfa0233027bf..21ea22694e479a53f8d5441b0c4f93e19e349f97 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1940,10 +1940,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) continue; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(dp->netdev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index a486008eb80a9d2db26ceadb1ada187569bdc6c8..252fe06f58aacd1c9d08e87d88517bbb04537f48 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -340,12 +340,12 @@ static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok, switch (maptype) { case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET: bartok = -1; - /* FALLTHROUGH */ + fallthrough; case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK: baract = NFP_CPP_ACTION_RW; if (act == 0) act = NFP_CPP_ACTION_RW; - /* FALLTHROUGH */ + fallthrough; case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED: break; default: diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 75f012444796d31a06923f4f09fd18d41c65665c..2260c2403a83a177df578bfe5bcad47981cc339f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -213,7 +213,7 @@ u64 nfp_rtsym_size(const struct nfp_rtsym *sym) return 0; default: pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type); - /* fall through */ + fallthrough; case NFP_RTSYM_TYPE_OBJECT: case NFP_RTSYM_TYPE_FUNCTION: return sym->size; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index a26966fa40b93f8ad3095575f54b7d7e41849c9b..dceec80fd64227b278a20ecd0d3fa912f7efc440 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -410,7 +410,7 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) case SPEED_1000 + HALF_DUPLEX: netdev_dbg(adapter->netdev, "Half Duplex is not supported at 1000 Mbps\n"); - /* fall through */ + fallthrough; case SPEED_1000 + FULL_DUPLEX: full_duplex_only: netdev_dbg(adapter->netdev, diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 647a1431b35915d5de0489026e982d0fdd1266bb..3da0753071788faa4de65e289b745da53bbdd747 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -1356,7 +1356,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phys[0] & 0x1f; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 8107d32c27673a2b8b8aa015a9198668ec040dfe..def65fee27b5a66a0c577f7d27686854d53d3ac0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -496,9 +496,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) struct ionic_cq *txcq; u32 rx_work_done = 0; u32 tx_work_done = 0; - u32 work_done = 0; u32 flags = 0; - bool unmask; lif = rxcq->bound_q->lif; idev = &lif->ionic->idev; @@ -512,17 +510,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) if (rx_work_done) ionic_rx_fill_cb(rxcq->bound_q); - unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); - - if (unmask && napi_complete_done(napi, rx_work_done)) { + if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { flags |= IONIC_INTR_CRED_UNMASK; DEBUG_STATS_INTR_REARM(rxcq->bound_intr); - work_done = rx_work_done; - } else { - work_done = budget; } - if (work_done || flags) { + if (rx_work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, tx_work_done + rx_work_done, flags); @@ -531,7 +524,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); - return work_done; + return rx_work_done; } static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index 66f45fce90fa8782333bd6fa646d4b4e691760b4..c3f50ddbe824148ead6fde0ed03eb820b2bc2ebd 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -153,7 +153,7 @@ netxen_nic_get_link_ksettings(struct net_device *dev, case NETXEN_BRDTYPE_P3_4_GB_MM: supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; - /* fall through */ + fallthrough; case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: @@ -182,7 +182,7 @@ netxen_nic_get_link_ksettings(struct net_device *dev, supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; - /* fall through */ + fallthrough; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: supported |= SUPPORTED_FIBRE; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 876743a79c1f5034e983ac6a68711c1ed14825b1..0e4cd8890cffc8d4dc303d852f38914ef356424f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2046,7 +2046,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } - /* fall through */ + fallthrough; case QED_PCI_ETH: { struct qed_eth_pf_params *p_params = diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b3c9ebaf228077e618f7af9ad49f07307d97e63c..b8f076e4e6b87704cc60bfbdd073bb94b8066ef8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3109,14 +3109,14 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_PORT: rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); if (rc) break; - /* Fall through */ + fallthrough; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, p_params->p_tunn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2558cb680db3159a8836dfe31016955cc3ba7c2a..f39f629242a11bbc1d57db4799d4ab6c19302c4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -761,7 +761,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) kfree(int_params->msix_table); if (force_mode) goto out; - /* Fallthrough */ + fallthrough; case QED_INT_MODE_MSI: if (cdev->num_hwfns == 1) { @@ -775,7 +775,7 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) if (force_mode) goto out; } - /* Fallthrough */ + fallthrough; case QED_INT_MODE_INTA: int_params->out.int_mode = QED_INT_MODE_INTA; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 5be08f83e0aa682bb3053110e56a9653933f65b7..cd882c4533942966ca87b2c6ff23928b34b710c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1085,7 +1085,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_NOTICE(p_hwfn, "Unknown WoL configuration %02x\n", p_hwfn->cdev->wol_config); - /* Fallthrough */ + fallthrough; case QED_OV_WOL_DEFAULT: wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } @@ -1365,7 +1365,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, break; case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: p_link->full_duplex = false; - /* Fall-through */ + fallthrough; case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: p_link->speed = 1000; break; @@ -2451,7 +2451,7 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); - /* Fallthrough */ + fallthrough; default: rc = -EINVAL; } @@ -3546,7 +3546,7 @@ qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, switch (p_in_params->cmd) { case DRV_MSG_SET_RESOURCE_VALUE_MSG: mfw_resc_info.size = p_in_params->resc_max_val; - /* Fallthrough */ + fallthrough; case DRV_MSG_GET_RESOURCE_ALLOC_MSG: break; default: @@ -3823,7 +3823,7 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Resource unlock request for an already released resource [%d]\n", p_params->resource); - /* Fallthrough */ + fallthrough; case RESOURCE_OPCODE_RELEASED: p_params->b_released = true; break; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 0d0e38debbc26b84180452a634aba6b76486ccfa..569e2a7a64e542b6303b09bae33e457c608546cc 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1542,7 +1542,7 @@ static void ql_link_state_machine_work(struct work_struct *work) if (test_bit(QL_LINK_MASTER, &qdev->flags)) ql_port_start(qdev); qdev->port_link_state = LS_DOWN; - /* Fall Through */ + fallthrough; case LS_DOWN: if (curr_link_state == LS_UP) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 5c2a3acf1e89b10646de3e5116c845108676ddd9..b9894d54469c498aeb9a8a12b4cb840096a2abee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -353,7 +353,7 @@ static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter, case QLCNIC_BRDTYPE_P3P_4_GB_MM: supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; - /* fall through */ + fallthrough; case QLCNIC_BRDTYPE_P3P_10G_CX4: case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: case QLCNIC_BRDTYPE_P3P_10000_BASE_T: @@ -377,7 +377,7 @@ static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter, supported |= SUPPORTED_TP; check_sfp_module = netif_running(adapter->netdev) && ahw->has_link_events; - /* fall through */ + fallthrough; case QLCNIC_BRDTYPE_P3P_10G_XFP: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d1da92ac7fbec8b593d35bd7dd5abc09182c098d..fc9e6626db558e47b7dfca524118548553f44da6 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4994,7 +4994,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) rtl_unlock_config_regs(tp); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); rtl_lock_config_regs(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: flags = PCI_IRQ_LEGACY; break; @@ -5137,7 +5137,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52: rtl8168ep_stop_cmac(tp); - /* fall through */ + fallthrough; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 99f7aae102ce12a15155c84d888af04229e80c85..df89d09b253e24e2fdc6a87a0d78f8e332735117 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1342,6 +1342,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, return error; } +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@ -1350,6 +1395,13 @@ static int ravb_open(struct net_device *ndev) struct device *dev = &pdev->dev; int error; + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + netdev_err(ndev, "failed to initialize MDIO\n"); + return error; + } + napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); @@ -1427,6 +1479,7 @@ static int ravb_open(struct net_device *ndev) out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); return error; } @@ -1736,6 +1789,8 @@ static int ravb_close(struct net_device *ndev) ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); + ravb_mdio_release(priv); + return 0; } @@ -1887,51 +1942,6 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_set_features = ravb_set_features, }; -/* MDIO bus init function */ -static int ravb_mdio_init(struct ravb_private *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - -out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; -} - -/* MDIO bus release function */ -static int ravb_mdio_release(struct ravb_private *priv) -{ - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; -} - static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@ -2174,13 +2184,6 @@ static int ravb_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } - /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; - } - netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); @@ -2202,8 +2205,6 @@ static int ravb_probe(struct platform_device *pdev) out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); -out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); @@ -2235,7 +2236,6 @@ static int ravb_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index fc99e7118e494d48350f79e39ce610bb95451a2b..42458a46ffaf6e0d0494a98d00e8d7cb7ba90571 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2169,7 +2169,7 @@ static void rocker_router_fib_event_work(struct work_struct *work) rocker_world_fib4_del(rocker, &fib_work->fen_info); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: rule = fib_work->fr_info.rule; if (!fib4_rule_default(rule)) @@ -2201,7 +2201,7 @@ static int rocker_router_fib_event(struct notifier_block *nb, fib_work->event = event; switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: if (info->family == AF_INET) { struct fib_entry_notifier_info *fen_info = ptr; @@ -2224,7 +2224,7 @@ static int rocker_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); fib_rule_get(fib_work->fr_info.rule); @@ -2811,7 +2811,7 @@ static int rocker_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 21465cb3d60ab09452c13c45ba8a606c684ad297..7f8b10c4966073c13849ea51a5c5511a929aeb88 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -316,7 +316,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, case TCP_V4_FLOW: case UDP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -327,7 +327,7 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, case TCP_V6_FLOW: case UDP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c index 9729983f48409d3cc2dd108813c14847b5634eb9..c54b7f8243f3ba9c03c8c50d30dc4fb6a6d9276e 100644 --- a/drivers/net/ethernet/sfc/ef100.c +++ b/drivers/net/ethernet/sfc/ef100.c @@ -142,7 +142,7 @@ static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_locatio /* Temporarily map new BAR. */ rc = efx_init_io(efx, bar, - DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), pci_resource_len(efx->pci_dev, bar)); if (rc) { netif_err(efx, probe, efx->net_dev, @@ -160,7 +160,7 @@ static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_locatio /* Put old BAR back. */ rc = efx_init_io(efx, previous_bar, - DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), pci_resource_len(efx->pci_dev, previous_bar)); if (rc) { netif_err(efx, probe, efx->net_dev, @@ -334,7 +334,7 @@ static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap, /* Temporarily map BAR. */ rc = efx_init_io(efx, bar, - DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), pci_resource_len(efx->pci_dev, bar)); if (rc) { netif_err(efx, probe, efx->net_dev, @@ -495,7 +495,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev, /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx, fcw.bar, - DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), pci_resource_len(efx->pci_dev, fcw.bar)); if (rc) goto fail; diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 206d70f9d95b0ac0e6992733b655cf9a861cfe3b..19fe86b3b3169b0636ba698738c31d2031588926 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -431,18 +431,18 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters * and reprobe after reset to avoid removing filters twice */ - down_read(&efx->filter_sem); + down_write(&efx->filter_sem); ef100_filter_table_down(efx); - up_read(&efx->filter_sem); + up_write(&efx->filter_sem); rc = efx_mcdi_reset(efx, reset_type); if (rc) return rc; netif_device_attach(efx->net_dev); - down_read(&efx->filter_sem); + down_write(&efx->filter_sem); rc = ef100_filter_table_up(efx); - up_read(&efx->filter_sem); + up_write(&efx->filter_sem); if (rc) return rc; @@ -739,6 +739,7 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_remove = efx_mcdi_rx_remove, .rx_write = ef100_rx_write, .rx_packet = __ef100_rx_packet, + .rx_buf_hash_valid = ef100_rx_buf_hash_valid, .fini_dmaq = efx_fini_dmaq, .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, .filter_table_probe = ef100_filter_table_up, @@ -820,6 +821,7 @@ const struct efx_nic_type ef100_vf_nic_type = { .rx_remove = efx_mcdi_rx_remove, .rx_write = ef100_rx_write, .rx_packet = __ef100_rx_packet, + .rx_buf_hash_valid = ef100_rx_buf_hash_valid, .fini_dmaq = efx_fini_dmaq, .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, .filter_table_probe = ef100_filter_table_up, diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c index 13ba1a4f66fcabe2cd89e1b4649cabe7b019ba57..85207acf7dee94b0e3af315619d1820837d10f36 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.c +++ b/drivers/net/ethernet/sfc/ef100_rx.c @@ -31,7 +31,12 @@ #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \ ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH -static bool check_fcs(struct efx_channel *channel, u32 *prefix) +bool ef100_rx_buf_hash_valid(const u8 *prefix) +{ + return PREFIX_FIELD(prefix, RSS_HASH_VALID); +} + +static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix) { u16 rxclass; u8 l2status; @@ -41,11 +46,11 @@ static bool check_fcs(struct efx_channel *channel, u32 *prefix) if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK)) /* Everything is ok */ - return 0; + return false; if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR) channel->n_rx_eth_crc_err++; - return 1; + return true; } void __ef100_rx_packet(struct efx_channel *channel) @@ -58,7 +63,7 @@ void __ef100_rx_packet(struct efx_channel *channel) prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN); - if (check_fcs(channel, prefix) && + if (ef100_has_fcs_error(channel, prefix) && unlikely(!(efx->net_dev->features & NETIF_F_RXALL))) goto out; diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h index f2f266863966a6a612851cd7a81e993cfedad8d8..fe45b36458d1a7c04487447092d39f906238db00 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.h +++ b/drivers/net/ethernet/sfc/ef100_rx.h @@ -14,6 +14,7 @@ #include "net_driver.h" +bool ef100_rx_buf_hash_valid(const u8 *prefix); void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event); void ef100_rx_write(struct efx_rx_queue *rx_queue); void __ef100_rx_packet(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a9808e86068d620dcb818e572a456991e8ca1b03..daf0c00c12424bcd76cf9c3b2ea3f9c06acd9dd0 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -45,6 +45,14 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel) __ef100_rx_packet, __efx_rx_packet, channel); } +static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) +{ + if (efx->type->rx_buf_hash_valid) + return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, + ef100_rx_buf_hash_valid, + prefix); + return true; +} /* Maximum number of TCP segments we support for soft-TSO */ #define EFX_TSO_MAX_SEGS 100 diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index db90d94e24c92525880cf3a38332fb38d7c20964..a6bae6a234bac5e9d3a32934f2ab119ef81e502a 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -957,7 +957,7 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev, switch (info->flow_type) { case TCP_V4_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 332183280a459dc8ebbda51069ad910ca6c39b3f..fa1ade856b104e9ad32b52fda3ff48b8a77badf1 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c @@ -1049,10 +1049,10 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags |= EF4_RX_PKT_TCP; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags |= EF4_RX_PKT_CSUMMED; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break; @@ -1310,7 +1310,7 @@ int ef4_farch_ev_process(struct ef4_channel *channel, int budget) if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; - /* else fall through */ + fallthrough; default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " @@ -1983,7 +1983,7 @@ ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec, EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT): is_full = true; - /* fall through */ + fallthrough; case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): { __be32 rhost, host1, host2; @@ -2034,7 +2034,7 @@ ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec, case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID: is_full = true; - /* fall through */ + fallthrough; case EF4_FILTER_MATCH_LOC_MAC: spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL : EF4_FARCH_FILTER_MAC_WILD); @@ -2081,7 +2081,7 @@ ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec, case EF4_FARCH_FILTER_TCP_FULL: case EF4_FARCH_FILTER_UDP_FULL: is_full = true; - /* fall through */ + fallthrough; case EF4_FARCH_FILTER_TCP_WILD: case EF4_FARCH_FILTER_UDP_WILD: { __be32 host1, host2; @@ -2125,7 +2125,7 @@ ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec, case EF4_FARCH_FILTER_MAC_FULL: is_full = true; - /* fall through */ + fallthrough; case EF4_FARCH_FILTER_MAC_WILD: gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC; if (is_full) diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index d07eeaad9bdffdefb2093e3f2929c66c84b469b2..4002f9a3ae9096c519cf13bc1a76982e00e678a3 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1038,10 +1038,10 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) switch (rx_ev_hdr_type) { case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags |= EFX_RX_PKT_TCP; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags |= EFX_RX_PKT_CSUMMED; - /* fall through */ + fallthrough; case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: case FSE_AZ_RX_EV_HDR_TYPE_OTHER: break; @@ -1316,7 +1316,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) if (efx->type->handle_global_event && efx->type->handle_global_event(channel, &event)) break; - /* else fall through */ + fallthrough; default: netif_err(channel->efx, hw, channel->efx->net_dev, "channel %d unknown event type %d (data " @@ -2043,7 +2043,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): is_full = true; - /* fall through */ + fallthrough; case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { __be32 rhost, host1, host2; @@ -2094,7 +2094,7 @@ efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: is_full = true; - /* fall through */ + fallthrough; case EFX_FILTER_MATCH_LOC_MAC: spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : EFX_FARCH_FILTER_MAC_WILD); @@ -2141,7 +2141,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, case EFX_FARCH_FILTER_TCP_FULL: case EFX_FARCH_FILTER_UDP_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_TCP_WILD: case EFX_FARCH_FILTER_UDP_WILD: { __be32 host1, host2; @@ -2185,7 +2185,7 @@ efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, case EFX_FARCH_FILTER_MAC_FULL: is_full = true; - /* fall through */ + fallthrough; case EFX_FARCH_FILTER_MAC_WILD: gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; if (is_full) diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 5a74d880b733839b7a19019018a573fa50781dce..1523be77b9db6c509e0492e23c60b52f5409de0b 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -140,7 +140,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, switch (encap_type & EFX_ENCAP_TYPES_MASK) { case EFX_ENCAP_TYPE_VXLAN: vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; - /* fallthrough */ + fallthrough; case EFX_ENCAP_TYPE_GENEVE: COPY_VALUE(ether_type, ETHER_TYPE); outer_ip_proto = IPPROTO_UDP; diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index 56af8b54a864f28e3d17198fd0daee9b0d109c51..714d7f93721225f58135927984acb631d83c58af 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -282,7 +282,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx, break; default: WARN_ON(1); - /* Fall through */ + fallthrough; case MC_CMD_FCNTL_OFF: link_state->fc = 0; break; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 7bb7ecb480ae1fc3102ab11c5380f2aec04844fd..062462a13847564ad2bfba3bcdb5bd6d4391b724 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -846,6 +846,7 @@ struct efx_async_filter_insertion { * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds * @timer_max_ns: Interrupt timer maximum value, in nanoseconds * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues + * @irqs_hooked: Channel interrupts are hooked * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues * @irq_rx_moderation_us: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags @@ -1004,6 +1005,7 @@ struct efx_nic { unsigned int timer_quantum_ns; unsigned int timer_max_ns; bool irq_rx_adaptive; + bool irqs_hooked; unsigned int irq_mod_step_us; unsigned int irq_rx_moderation_us; u32 msg_enable; @@ -1265,6 +1267,7 @@ struct efx_udp_tunnel { * @rx_write: Write RX descriptors and doorbell * @rx_defer_refill: Generate a refill reminder event * @rx_packet: Receive the queued RX buffer on a channel + * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash * @ev_probe: Allocate resources for event queue * @ev_init: Initialise event queue on the NIC * @ev_fini: Deinitialise event queue on the NIC @@ -1409,6 +1412,7 @@ struct efx_nic_type { void (*rx_write)(struct efx_rx_queue *rx_queue); void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); void (*rx_packet)(struct efx_channel *channel); + bool (*rx_buf_hash_valid)(const u8 *prefix); int (*ev_probe)(struct efx_channel *channel); int (*ev_init)(struct efx_channel *channel); void (*ev_fini)(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index d994d136bb0380c0d098ff82bc868f2069ce852e..d1e908846f5dd9347adae576456dee70caa6820a 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -129,6 +129,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) #endif } + efx->irqs_hooked = true; return 0; fail2: @@ -154,6 +155,8 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) efx->net_dev->rx_cpu_rmap = NULL; #endif + if (!efx->irqs_hooked) + return; if (EFX_INT_MODE_USE_MSI(efx)) { /* Disable MSI/MSI-X interrupts */ efx_for_each_channel(channel, efx) @@ -163,6 +166,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) /* Disable legacy interrupt */ free_irq(efx->legacy_irq, efx); } + efx->irqs_hooked = false; } /* Register dump */ diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 59a43d586967fde0481645b02b56c18a62fd467d..aaa112877561ffdeaf5bd7c0661be7e1232cd969 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -358,7 +358,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, case XDP_ABORTED: trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); - /* Fall through */ + fallthrough; case XDP_DROP: efx_free_rx_buffers(rx_queue, rx_buf, 1); channel->n_rx_xdp_drops++; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index fb77c7bbe4af1db0fb24817d0149dc75171b5797..5e29284c89c982eccda9bc2e321cc3ebdadf38c9 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -525,7 +525,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, return; } - if (efx->net_dev->features & NETIF_F_RXHASH) + if (efx->net_dev->features & NETIF_F_RXHASH && + efx_rx_buf_hash_valid(efx, eh)) skb_set_hash(skb, efx_rx_buf_hash(efx, eh), PKT_HASH_TYPE_L3); if (csum) { @@ -848,6 +849,7 @@ void efx_remove_filters(struct efx_nic *efx) efx_for_each_channel(channel, efx) { cancel_delayed_work_sync(&channel->filter_work); kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; } #endif down_write(&efx->filter_sem); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 336105f7731312ba44b653e17eddb3a097015849..cfa460c7db2316e325cb2ca736aab982a7085aee 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2228,7 +2228,7 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = sis_priv->mii->phy_addr; - /* Fall Through */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 186c0bddbe5fec665d5d9ca0d1a03e942240c8d0..01069dfaf75c9c156d142fa19efb014820e12b37 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -712,7 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev) /* Found an external PHY */ break; } - /* Else, fall through */ + fallthrough; default: /* Internal media only */ SMC_GET_PHY_ID1(lp, 1, id1); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 25db667fa879c1f13c03d36af316264cb68d49ed..806eb651cea30fa57cd7b8e8f8857f76edc2363c 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -919,10 +919,10 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->ndev, prog, act); - /* fall through -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: ret = NETSEC_XDP_CONSUMED; page = virt_to_head_page(xdp->data); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index d0d2d0fc5f0a9a78da74c9f4c042b3de631fdd29..08c76636c1641327b2e257a33db577d6f71486d1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -84,9 +84,10 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) return ERR_PTR(err); switch (phy_mode) { - case PHY_INTERFACE_MODE_RGMII: /* Fall through */ - case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */: - case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII: + fallthrough; + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII; break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index e113b1376fddac7056c804143f6e2390e5f3343b..bf195adee393fa762f9579f9fbfabc24765f115d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev, ret = phy_loopback(dev->phydev, true); if (!ret) break; - /* Fallthrough */ + fallthrough; case STMMAC_LOOPBACK_MAC: ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true); break; @@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev, ret = phy_loopback(dev->phydev, false); if (!ret) break; - /* Fallthrough */ + fallthrough; case STMMAC_LOOPBACK_MAC: stmmac_set_mac_loopback(priv, priv->ioaddr, false); break; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 3d747846f4829be0373ed6a211f15607e213e1e2..cc27d660a81851b13883c0c4ae8720ed1f3766fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -228,7 +228,7 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv, switch (cls->command) { case TC_CLSU32_REPLACE_KNODE: tc_unfill_entry(priv, cls); - /* Fall through */ + fallthrough; case TC_CLSU32_NEW_KNODE: return tc_config_knode(priv, cls); case TC_CLSU32_DELETE_KNODE: diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index e2bc7a25f6d1fbf666f4be67794904e20fa64e0a..b624e177ec7152ce081936aab658b56cd110e01d 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4759,7 +4759,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = cp->phy_addr; - /* Fallthrough... */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 9b5effb72657213f9f9e88b9acad9e91ceb7ee3d..68695d4afacd542a7d8d51d18da7a07918f72575 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -8835,7 +8835,7 @@ static int walk_phys(struct niu *np, struct niu_parent *parent) else goto unknown_vg_1g_port; - /* fallthru */ + fallthrough; case 0x22: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1) | @@ -8860,7 +8860,7 @@ static int walk_phys(struct niu *np, struct niu_parent *parent) else goto unknown_vg_1g_port; - /* fallthru */ + fallthrough; case 0x13: if ((lowest_10g & 0x7) == 0) val = (phy_encode(PORT_TYPE_10G, 0) | diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index eeb8518c8a84d43421a4245e56f486f686339c02..8deb943ca5de0ade288b03fce2db6f3627991332 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2712,7 +2712,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; - /* Fallthrough... */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index cb994f66c3be2328d134a5683e232923509ab636..9baf3f3da91e1ca137637b72328f0a8554782dde 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -174,6 +174,8 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev) if (phy->speed == 10 && phy_interface_is_rgmii(phy)) /* Can be used with in band mode only */ mac_control |= CPSW_SL_CTL_EXT_EN; + if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII) + mac_control |= CPSW_SL_CTL_IFCTL_A; if (phy->duplex) mac_control |= CPSW_SL_CTL_FULLDUPLEX; diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 4e184eecc8e180c737c620c49523aca605526d6d..6e72ecbe5cf7e50420b8d097098373d1648d1760 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -67,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, dev_warn(priv->dev, "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", phy_modes(phy_mode)); - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; @@ -122,7 +122,7 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, dev_warn(priv->dev, "Unsupported PHY mode: \"%s\". Defaulting to MII.\n", phy_modes(phy_mode)); - /* fallthrough */ + fallthrough; case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 9b17bbbe102fe419a208dc9741f26b6adfaf1fb4..4a65edc5a3759393215c419c3a4c72781d37bea0 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1116,7 +1116,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, HOST_PORT_NUM, ALE_VLAN, vid); ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); - ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); + ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid); err: pm_runtime_put(cpsw->dev); return ret; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 1247d35d42ef38f0409b0e7dace448094dd72c49..8ed78577cdedf61ef19044a5659f1747f191d2ed 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1032,19 +1032,34 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, return ret; } + /* reset the return code as pm_runtime_get_sync() can return + * non zero values as well. + */ + ret = 0; for (i = 0; i < cpsw->data.slaves; i++) { if (cpsw->slaves[i].ndev && - vid == cpsw->slaves[i].port_vlan) + vid == cpsw->slaves[i].port_vlan) { + ret = -EINVAL; goto err; + } } dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid); - cpsw_ale_del_vlan(cpsw->ale, vid, 0); - cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, - HOST_PORT_NUM, ALE_VLAN, vid); - cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); - cpsw_ale_flush_multicast(cpsw->ale, 0, vid); + ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); + if (ret) + dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret); + ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + if (ret) + dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n", + ret); + ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + if (ret) + dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n", + ret); + cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid); + ret = 0; err: pm_runtime_put(cpsw->dev); return ret; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index d6d7a7d9c7ad07234458801ba0108787e7af0766..482a1a451e437169c140361f03b3a1843350dbdd 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1371,10 +1371,10 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(ndev, prog, act); - /* fall through -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: goto drop; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 58623e974a0c0d1d915d440d6e4b3f5eb31f1bc6..76a342ea379727639f8ab02353a2b9fb23151161 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -948,7 +948,7 @@ static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: /* get address of MII PHY in use. */ data->phy_id = phy; - /* fall through */ + fallthrough; case SIOCGMIIREG: /* read MII PHY register. */ diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 2db546b27ee009221a78201b351f9e4d560636cf..dc14a66583ff37680f85619762f5cf504b2c7f66 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -877,7 +877,7 @@ static int gelic_wl_set_auth(struct net_device *netdev, case IW_AUTH_KEY_MGMT: if (param->value & IW_AUTH_KEY_MGMT_PSK) break; - /* intentionally fall through */ + fallthrough; default: ret = -EOPNOTSUPP; break; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 07389702a5400ba5362d8854ff1881f99bbe16c4..5f5b33e6653b2b68f8b9b88a2a2881322b554f50 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -786,7 +786,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) /* fallthrough, if we release the descriptors * brutally (then we don't care about * SPIDER_NET_DESCR_CARDOWNED) */ - /* Fall through */ + fallthrough; case SPIDER_NET_DESCR_RESPONSE_ERROR: case SPIDER_NET_DESCR_PROTECTION_ERROR: @@ -1397,9 +1397,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, show_error = 0; break; - case SPIDER_NET_GDDDEN0INT: /* fallthrough */ - case SPIDER_NET_GDCDEN0INT: /* fallthrough */ - case SPIDER_NET_GDBDEN0INT: /* fallthrough */ + case SPIDER_NET_GDDDEN0INT: + case SPIDER_NET_GDCDEN0INT: + case SPIDER_NET_GDBDEN0INT: case SPIDER_NET_GDADEN0INT: /* someone has set RX_DMA_EN to 0 */ show_error = 0; @@ -1449,10 +1449,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, * Logging is not needed. */ show_error = 0; break; - case SPIDER_NET_GRFDFLLINT: /* fallthrough */ - case SPIDER_NET_GRFCFLLINT: /* fallthrough */ - case SPIDER_NET_GRFBFLLINT: /* fallthrough */ - case SPIDER_NET_GRFAFLLINT: /* fallthrough */ + case SPIDER_NET_GRFDFLLINT: + case SPIDER_NET_GRFCFLLINT: + case SPIDER_NET_GRFBFLLINT: + case SPIDER_NET_GRFAFLLINT: case SPIDER_NET_GRMFLLINT: /* Could happen when rx chain is full */ if (card->ignore_rx_ramfull == 0) { @@ -1473,9 +1473,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, break; /* chain end */ - case SPIDER_NET_GDDDCEINT: /* fallthrough */ - case SPIDER_NET_GDCDCEINT: /* fallthrough */ - case SPIDER_NET_GDBDCEINT: /* fallthrough */ + case SPIDER_NET_GDDDCEINT: + case SPIDER_NET_GDCDCEINT: + case SPIDER_NET_GDBDCEINT: case SPIDER_NET_GDADCEINT: spider_net_resync_head_ptr(card); spider_net_refill_rx_chain(card); @@ -1486,9 +1486,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, break; /* invalid descriptor */ - case SPIDER_NET_GDDINVDINT: /* fallthrough */ - case SPIDER_NET_GDCINVDINT: /* fallthrough */ - case SPIDER_NET_GDBINVDINT: /* fallthrough */ + case SPIDER_NET_GDDINVDINT: + case SPIDER_NET_GDCINVDINT: + case SPIDER_NET_GDBINVDINT: case SPIDER_NET_GDAINVDINT: /* Could happen when rx chain is full */ spider_net_resync_head_ptr(card); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 3e3883ad88b080578ab450c992c0f9af4318ba98..3e337142b5161c1c097788b236fb05336ac55672 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1434,7 +1434,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = 0; /* we have only this address */ - /* fall through */ + fallthrough; case SIOCGMIIREG: /* Read the specified MII register. */ data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); diff --git a/drivers/net/fddi/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c index e9bf42996de831e7289282525899280cd544cb77..4eea3408034be09695d0735fe09a2f0d7780ec68 100644 --- a/drivers/net/fddi/skfp/cfm.c +++ b/drivers/net/fddi/skfp/cfm.c @@ -36,10 +36,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ; -#endif - /* * FSM Macros */ @@ -208,7 +204,6 @@ void cfm(struct s_smc *smc, int event) { int state ; /* remember last state */ int cond ; - int oldstate ; /* We will do the following: */ /* - compute the variable WC_Flag for every port (This is where */ @@ -222,7 +217,6 @@ void cfm(struct s_smc *smc, int event) /* - change the portstates */ cem_priv_state (smc, event); - oldstate = smc->mib.fddiSMTCF_State ; do { DB_CFM("CFM : state %s%s event %s", smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "", @@ -250,18 +244,11 @@ void cfm(struct s_smc *smc, int event) if (cond != smc->mib.fddiSMTPeerWrapFlag) smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ; -#if 0 /* - * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired + * Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired * to the primary path. */ - /* - * path change - */ - if (smc->mib.fddiSMTCF_State != oldstate) { - smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ; - } -#endif + #endif /* no SLIM_SMT */ /* diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c index 02966d141948faffe79134d93325cf66e530345f..4cbb145c74abbf2dd2421a6c1c1fbc5844bf426a 100644 --- a/drivers/net/fddi/skfp/fplustm.c +++ b/drivers/net/fddi/skfp/fplustm.c @@ -21,10 +21,6 @@ #include #include -#ifndef lint -static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ; -#endif - #ifndef UNUSED #ifdef lint #define UNUSED(x) (x) = (x) diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c index 3412e0fb0ac4b4c5f4b4e71ec3a1c07bb7ff11f2..1070390565114738485741fba8b71b8959716e3c 100644 --- a/drivers/net/fddi/skfp/hwmtm.c +++ b/drivers/net/fddi/skfp/hwmtm.c @@ -10,10 +10,6 @@ * ******************************************************************************/ -#ifndef lint -static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ; -#endif - #define HWMTM #ifndef FDDI diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 1be039579d7090b3e22a27624e8c135a9358ff1a..554cde8d6073e61a4857b7b1b6a74ef79a63b9a2 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -847,7 +847,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) case ACTIONS(PC5_SIGNAL) : ACTIONS_DONE() ; - /* fall through */ + fallthrough; case PC5_SIGNAL : if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT)) break ; @@ -946,7 +946,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ; ACTIONS_DONE() ; cmd = 0 ; - /* fall thru */ + fallthrough; case PC6_JOIN : switch (plc->p_state) { case PS_ACTIVE: diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index b8c59d803ce68fc9bc3d81ee965cd3de5a76a021..774a6e3b0a67fbbae2bf4c7281727b0d6f6b4578 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -20,10 +20,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; -#endif - /* * FC in SMbuf */ @@ -1561,7 +1557,7 @@ u_long smt_get_tid(struct s_smc *smc) return tid & 0x3fffffffL; } - +#ifdef LITTLE_ENDIAN /* * table of parameter lengths */ @@ -1641,6 +1637,7 @@ static const struct smt_pdef { } ; #define N_SMT_PLEN ARRAY_SIZE(smt_pdef) +#endif int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short list[]) diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 8c810edece866dcf2aac96bb57f690cce3ad7bf5..466622664424dcd14085257b76c32790f8eff6e6 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -974,7 +974,7 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid) FJES_RX_STOP_REQ_DONE; spin_unlock_irqrestore(&hw->rx_status_lock, flags); clear_bit(src_epid, &hw->txrx_stop_req_bit); - /* fall through */ + fallthrough; case EP_PARTNER_UNSHARE: case EP_PARTNER_COMPLETE: default: diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 21640a035d7df30de40da0b14a7e6abd309550ef..8e47d0112e5dc3798a4ffbf0e345f5b35419d319 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1179,6 +1179,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 4476491b58f9d3270467528f0d258247d273c52c..e4e4981ac1d297cdb41902da306954d7bb05d167 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -500,7 +500,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat) } break; } - /* fall through */ + fallthrough; default: if (bc->hdlctx.calibrate <= 0) diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index deef14215110494783720b94908b432537ee664a..17be2bb2985cd00ab965f072762c1ff7e3f6da93 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -482,7 +482,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) case CRC_MODE_SMACK_TEST: ax->crcmode = CRC_MODE_FLEX_TEST; printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name); - // fall through + fallthrough; case CRC_MODE_SMACK: *p |= 0x80; crc = swab16(crc16(0, p, len)); @@ -491,7 +491,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) case CRC_MODE_FLEX_TEST: ax->crcmode = CRC_MODE_NONE; printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name); - // fall through + fallthrough; case CRC_MODE_FLEX: *p |= 0x20; crc = calc_crc_flex(p, len); @@ -744,7 +744,6 @@ static int mkiss_open(struct tty_struct *tty) ax->dev->name); break; case 0: - /* fall through */ default: crc_force = 0; printk(KERN_INFO "mkiss: %s: crc mode is auto.\n", diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 787f17e2a971680dfd06ec5e148ba08670fbd458..64b0a74c1523142ec5124338105e9e4187ef3c5f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -367,7 +367,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, } rcu_read_unlock(); - while (unlikely(txq >= ndev->real_num_tx_queues)) + while (txq >= ndev->real_num_tx_queues) txq -= ndev->real_num_tx_queues; return txq; @@ -502,7 +502,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, int rc; skb->dev = vf_netdev; - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); rc = dev_queue_xmit(skb); if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 15e87c097b0b363d40928517b139d248f2d0fb5f..5bca94c990061820b69d7c5d1916daa3eb85f68e 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; @@ -227,7 +238,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -734,10 +752,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 4942f6112e51f838057974c2e386ba593f939cba..c8d803d3616c90a74c05674a72c283b8062f4067 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -842,7 +842,7 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (!net_eq(dev_net(dev), &init_net)) break; - /* fall through */ + fallthrough; case SIOCGHWTSTAMP: if (netif_device_present(real_dev) && ops->ndo_do_ioctl) err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); @@ -1269,6 +1269,9 @@ static void macvlan_port_destroy(struct net_device *dev) static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct nlattr *nla, *head; + int rem, len; + if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; @@ -1316,6 +1319,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } + if (data[IFLA_MACVLAN_MACADDR_DATA]) { + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(nla))) + return -EADDRNOTAVAIL; + } + } + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; @@ -1372,10 +1389,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { - if (nla_type(nla) != IFLA_MACVLAN_MACADDR || - nla_len(nla) != ETH_ALEN) - continue; - addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) diff --git a/drivers/net/mii.c b/drivers/net/mii.c index 44612122338bba29be8a0160169c877b237668af..f6a97c859f3a4b47ce4cec58b2e5bed64567615f 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -597,7 +597,7 @@ int generic_mii_ioctl(struct mii_if_info *mii_if, switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; - /* fall through */ + fallthrough; case SIOCGMIIREG: mii_data->val_out = diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index 7971dc4f54f15fa20ad0ed579590872062f7dce6..0e9511661601a66540fef0562694cac19cdb9bb2 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -193,7 +193,7 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count) switch (err) { case 1: port_count = 1; - /* fall through */ + fallthrough; case 2: if (id > INT_MAX) { pr_err("Value of \"id\" is too big.\n"); diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index f32d56ac3e80faaa9e30b6b6aea8cc1b49a6f947..deea17a0e79c9ee55cbd27c76881d0e1fdc05bfd 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -760,14 +760,14 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, spin_lock_bh(&data->fib_lock); switch (event) { - case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: err = nsim_fib_rule_event(data, info, event == FIB_EVENT_RULE_ADD); break; - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_APPEND: case FIB_EVENT_ENTRY_DEL: err = nsim_fib_event(data, info, event); break; diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index 7471a8b908735ceee56422f552d551957f2f86cc..307f0ac1287bed062acab03757e68ee5dbac5740 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -366,10 +366,10 @@ static int adin_set_edpd(struct phy_device *phydev, u16 tx_interval) switch (tx_interval) { case 1000: /* 1 second */ - /* fallthrough */ + fallthrough; case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS: val |= ADIN1300_NRG_PD_TX_EN; - /* fallthrough */ + fallthrough; case ETHTOOL_PHY_EDPD_NO_TX: break; default: diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 50fb7d16b75ac9a49ddef1d31f7fa42f3dd15318..79e67f2fe00ae07c17b06db5c6cdec3f6f29e2a8 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -766,13 +766,13 @@ static int decode_evnt(struct dp83640_private *dp83640, switch (words) { case 3: dp83640->edata.sec_hi = phy_txts->sec_hi; - /* fall through */ + fallthrough; case 2: dp83640->edata.sec_lo = phy_txts->sec_lo; - /* fall through */ + fallthrough; case 1: dp83640->edata.ns_hi = phy_txts->ns_hi; - /* fall through */ + fallthrough; case 0: dp83640->edata.ns_lo = phy_txts->ns_lo; } @@ -1409,7 +1409,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts, kfree_skb(skb); return; } - /* fall through */ + fallthrough; case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index f3c04981b8da67846eb9c0e5b20208741aecd3cc..cd7032628a28ceeec4d55ba4594269b3e09d5e84 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -215,9 +215,9 @@ static int dp83867_set_wol(struct phy_device *phydev, if (wol->wolopts & WAKE_MAGICSECURE) { phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, (wol->sopass[1] << 8) | wol->sopass[0]); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP2, (wol->sopass[3] << 8) | wol->sopass[2]); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1, + phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP3, (wol->sopass[5] << 8) | wol->sopass[4]); val_rxcfg |= DP83867_WOL_SEC_EN; diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index 58103152c6010e844589d81785ac0b7727b25ea7..6b98d74b5102bc98c524e790f8c0e60f2e75508e 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -427,18 +427,18 @@ static int dp83869_config_init(struct phy_device *phydev) return ret; val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL); - val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN | - DP83869_RGMII_RX_CLK_DELAY_EN); + val |= (DP83869_RGMII_TX_CLK_DELAY_EN | + DP83869_RGMII_RX_CLK_DELAY_EN); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) - val |= (DP83869_RGMII_TX_CLK_DELAY_EN | - DP83869_RGMII_RX_CLK_DELAY_EN); + val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN | + DP83869_RGMII_RX_CLK_DELAY_EN); if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - val |= DP83869_RGMII_TX_CLK_DELAY_EN; + val &= ~DP83869_RGMII_TX_CLK_DELAY_EN; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) - val |= DP83869_RGMII_RX_CLK_DELAY_EN; + val &= ~DP83869_RGMII_RX_CLK_DELAY_EN; ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL, val); diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index c4641b1704d614862e0383d2f2186b318b57f875..18d81f43f2a889be11095d4613dc720d34d0ab6c 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -279,13 +279,13 @@ static struct phy_device *__fixed_phy_register(unsigned int irq, phy->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phy->supported); - /* fall through */ + fallthrough; case SPEED_100: linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phy->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phy->supported); - /* fall through */ + fallthrough; case SPEED_10: default: linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index a4fbf3a4fa97398eb3e5a51b02817a77b7b81ef5..6bc7406a1ce7311673cd81d32b764cccf43a1199 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1738,13 +1738,13 @@ static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb, return 0; } -/* Trigger a read to the spcified MCB */ +/* Trigger a read to the specified MCB */ static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb) { return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ); } -/* Trigger a write to the spcified MCB */ +/* Trigger a write to the specified MCB */ static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb) { return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 79b4f35d151ecc58c551548ee2306499c911bd63..735a806045ac313d4ab17cd1e63819614ee4056b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -355,7 +355,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii_data->phy_id = phydev->mdio.addr; - /* fall through */ + fallthrough; case SIOCGMIIREG: if (mdio_phy_id_is_c45(mii_data->phy_id)) { @@ -433,7 +433,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (phydev->mii_ts && phydev->mii_ts->hwtstamp) return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr); - /* fall through */ + fallthrough; default: return -EOPNOTSUPP; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 57d44648c8dd1cbb5e54487d3d2497bbbf4d5612..8adfbad0a1e8f08a3ebd0c42e97e2e34ec6dc5b7 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1979,7 +1979,7 @@ static int genphy_setup_master_slave(struct phy_device *phydev) break; case MASTER_SLAVE_CFG_MASTER_FORCE: ctl |= CTL1000_AS_MASTER; - /* fallthrough */ + fallthrough; case MASTER_SLAVE_CFG_SLAVE_FORCE: ctl |= CTL1000_ENABLE_MASTER; break; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 32b4bd6a5b55910e066069d4556d44c4307794da..32f4e8ec96cff23deba27167dc3d9ed1fdfaeb5c 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1905,7 +1905,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii->phy_id = pl->phydev->mdio.addr; - /* fall through */ + fallthrough; case SIOCGMIIREG: ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); @@ -1928,7 +1928,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii->phy_id = 0; - /* fall through */ + fallthrough; case SIOCGMIIREG: ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 6900c68260e0dca7d54a4961389dc65c1f90415f..58014feedf6c882b826a103975bca0c09406383a 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -149,7 +149,7 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, port = PORT_TP; break; } - /* fallthrough */ + fallthrough; case SFF8024_CONNECTOR_SG: /* guess */ case SFF8024_CONNECTOR_HSSDC_II: case SFF8024_CONNECTOR_NOSEPARATE: @@ -301,7 +301,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, break; case SFF8024_ECC_100GBASE_CR4: phylink_set(modes, 100000baseCR4_Full); - /* fallthrough */ + fallthrough; case SFF8024_ECC_25GBASE_CR_S: case SFF8024_ECC_25GBASE_CR_N: phylink_set(modes, 25000baseCR_Full); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index c24b0e83dd3291faccae72d0348325bae845736b..cf83314c8591eead4c4a60ac9ec86dae99393dc7 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -552,7 +552,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_temp_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_temp_input: case hwmon_temp_label: return 0444; @@ -571,7 +571,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_in_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_in_input: case hwmon_in_label: return 0444; @@ -590,7 +590,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_curr_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_curr_input: case hwmon_curr_label: return 0444; @@ -618,7 +618,7 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_power_crit: if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) return 0; - /* fall through */ + fallthrough; case hwmon_power_input: case hwmon_power_label: return 0444; @@ -1872,7 +1872,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) dev_warn(sfp->dev, "hwmon probe failed: %d\n", err); sfp_sm_mod_next(sfp, SFP_MOD_WAITDEV, 0); - /* fall through */ + fallthrough; case SFP_MOD_WAITDEV: /* Ensure that the device is attached before proceeding */ if (sfp->sm_dev_state < SFP_DEV_DOWN) @@ -1890,7 +1890,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) goto insert; sfp_sm_mod_next(sfp, SFP_MOD_HPOWER, 0); - /* fall through */ + fallthrough; case SFP_MOD_HPOWER: /* Enable high power mode */ err = sfp_sm_mod_hpower(sfp, true); diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index d82016dcde3b961b083c955b8037093c5b6538a8..4406b353123edadd372a7e522f25c38a631bb9fc 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -498,7 +498,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev, *data_p = (c0 >> 3) & 0x0f; write_data (dev, 0x10); /* send ACK */ *ns_p = PLIP_NB_1; - /* fall through */ + fallthrough; case PLIP_NB_1: cx = nibble_timeout; @@ -594,7 +594,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, printk(KERN_DEBUG "%s: receive start\n", dev->name); rcv->state = PLIP_PK_LENGTH_LSB; rcv->nibble = PLIP_NB_BEGIN; - /* fall through */ + fallthrough; case PLIP_PK_LENGTH_LSB: if (snd->state != PLIP_PK_DONE) { @@ -615,7 +615,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, return TIMEOUT; } rcv->state = PLIP_PK_LENGTH_MSB; - /* fall through */ + fallthrough; case PLIP_PK_LENGTH_MSB: if (plip_receive(nibble_timeout, dev, @@ -638,7 +638,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, rcv->state = PLIP_PK_DATA; rcv->byte = 0; rcv->checksum = 0; - /* fall through */ + fallthrough; case PLIP_PK_DATA: lbuf = rcv->skb->data; @@ -651,7 +651,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, rcv->checksum += lbuf[--rcv->byte]; } while (rcv->byte); rcv->state = PLIP_PK_CHECKSUM; - /* fall through */ + fallthrough; case PLIP_PK_CHECKSUM: if (plip_receive(nibble_timeout, dev, @@ -664,7 +664,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, return ERROR; } rcv->state = PLIP_PK_DONE; - /* fall through */ + fallthrough; case PLIP_PK_DONE: /* Inform the upper layer for the arrival of a packet. */ @@ -710,7 +710,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev, case PLIP_NB_BEGIN: write_data (dev, data & 0x0f); *ns_p = PLIP_NB_1; - /* fall through */ + fallthrough; case PLIP_NB_1: write_data (dev, 0x10 | (data & 0x0f)); @@ -725,7 +725,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev, } write_data (dev, 0x10 | (data >> 4)); *ns_p = PLIP_NB_2; - /* fall through */ + fallthrough; case PLIP_NB_2: write_data (dev, (data >> 4)); @@ -814,7 +814,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, &snd->nibble, snd->length.b.lsb)) return TIMEOUT; snd->state = PLIP_PK_LENGTH_MSB; - /* fall through */ + fallthrough; case PLIP_PK_LENGTH_MSB: if (plip_send(nibble_timeout, dev, @@ -823,7 +823,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, snd->state = PLIP_PK_DATA; snd->byte = 0; snd->checksum = 0; - /* fall through */ + fallthrough; case PLIP_PK_DATA: do { @@ -835,7 +835,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, snd->checksum += lbuf[--snd->byte]; } while (snd->byte); snd->state = PLIP_PK_CHECKSUM; - /* fall through */ + fallthrough; case PLIP_PK_CHECKSUM: if (plip_send(nibble_timeout, dev, @@ -846,7 +846,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, dev_kfree_skb(snd->skb); dev->stats.tx_packets++; snd->state = PLIP_PK_DONE; - /* fall through */ + fallthrough; case PLIP_PK_DONE: /* Close the connection */ @@ -935,7 +935,7 @@ plip_interrupt(void *dev_id) switch (nl->connection) { case PLIP_CN_CLOSING: netif_wake_queue (dev); - /* fall through */ + fallthrough; case PLIP_CN_NONE: case PLIP_CN_SEND: rcv->state = PLIP_PK_TRIGGER; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3c11a77f5709f1b0218e44f2e5b1fd90256e0d73..7959b5c2d11f1cdcc4d549546abeff694415e8a1 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1590,10 +1590,10 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: this_cpu_inc(tun->pcpu_stats->rx_dropped); break; @@ -2417,7 +2417,7 @@ static int tun_xdp_one(struct tun_struct *tun, switch (err) { case XDP_REDIRECT: *flush = true; - /* fall through */ + fallthrough; case XDP_TX: return 0; case XDP_PASS: diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index a7fbc3ccd29e422d231e2e8aab3b5d453ebb1e36..c7bcfca7d70b091a31a88113ad28a6ae97fc9e46 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -252,6 +252,7 @@ config USB_NET_CDC_EEM config USB_NET_CDC_NCM tristate "CDC NCM support" depends on USB_USBNET + select USB_NET_CDCETHER default y help This driver provides support for CDC NCM (Network Control Model diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 7e44110746dd00f11382b9a03d8c56e724e00c59..0717c18015c9c5a93c2dc8dc29d864addf6ea8fa 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -333,13 +333,13 @@ static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; - /* fall-through */ + fallthrough; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; - /* fall-through */ + fallthrough; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; - /* fall-through */ + fallthrough; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; /* fall-through */ diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index e39f41efda3ec1ecac5616d509f183c99ea69a33..7bc6e8f856fe0bd0188d3cb3af772d6e44870d43 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -296,7 +296,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) netdev_dbg(dev->net, "asix_get_phy_addr()\n"); - if (ret < 0) { + if (ret < 2) { netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); goto out; } diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index d387bc7ac1b60b61969a8fa2eed848438438de59..97ba67042d126f07542f6e51f21d9378fa0ed3db 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -858,7 +858,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); - /* fall through */ + fallthrough; case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 9bdbd7b472a0ac622f843f43e90de8c11621ba88..dba847f280962d9feaa269d0e2cc012a7b5ff18b 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -97,7 +97,7 @@ static void tx_complete(struct urb *req) case -ECONNRESET: case -ESHUTDOWN: dev->stats.tx_aborted_errors++; - /* fall through */ + fallthrough; default: dev->stats.tx_errors++; dev_dbg(&dev->dev, "TX error (%d)\n", status); diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index b91f92e4e5f22d659d89c35d8accc6ef03191b74..915ac75b55fc7c43f55db19b379983216b93de5f 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -625,6 +625,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 442507f25aadb7dca5bd84787a8d85a1a1b0822e..65b315bc60abd649f8743f3ae23d130fb27301f1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3192,7 +3192,7 @@ static void rx_complete(struct urb *urb) case -EPIPE: dev->net->stats.rx_errors++; lan78xx_defer_kevent(dev, EVENT_RX_HALT); - /* FALLTHROUGH */ + fallthrough; case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, @@ -3213,7 +3213,7 @@ static void rx_complete(struct urb *urb) /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; - /* FALLTHROUGH */ + fallthrough; default: state = rx_cleanup; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 0ef7e1f443e333ffd49cab9161ccb55543a7a20e..e92cb51a2c77028b8c2f05b923bcda8312313be6 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -629,7 +629,7 @@ static void write_bulk_callback(struct urb *urb) return; default: netif_info(pegasus, tx_err, net, "TX status %d\n", status); - /* FALL THROUGH */ + fallthrough; case 0: break; } @@ -1009,7 +1009,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = pegasus->phy; - /* fall through */ + fallthrough; case SIOCDEVPRIVATE + 1: read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); res = 0; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2b02fefd094d53fc1ed2342625c8f479f5652cae..b1770489aca5194a3ce054962c4c7cab1fc69281 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1682,7 +1682,7 @@ static void intr_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); - /* fall through */ + fallthrough; case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, @@ -3251,7 +3251,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); - /* fall through */ + fallthrough; default: if (data != PHY_STAT_LAN_ON) @@ -4849,7 +4849,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, tp->ups_info.speed_duplex = NWAY_1000M_FULL; break; } - /* fall through */ + fallthrough; default: ret = -EINVAL; goto out; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index e7c630d375899ba3f94176b053c0b11e864c1c3d..733f120c852b727224fe645ca0a5b368c7f6758d 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -843,7 +843,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; - /* fall through */ + fallthrough; case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e45935a5856aa5425240a39657ba0355793bf951..2b2a841cd93884c9451ba9143821d9e66afff7f5 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -110,7 +110,7 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf) if (!usb_endpoint_dir_in(&e->desc)) continue; intr = 1; - /* FALLTHROUGH */ + fallthrough; case USB_ENDPOINT_XFER_BULK: break; default: @@ -628,7 +628,7 @@ static void rx_complete (struct urb *urb) /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; - // FALLTHROUGH + fallthrough; default: state = rx_cleanup; @@ -1530,7 +1530,7 @@ static void usbnet_bh (struct timer_list *t) continue; case tx_done: kfree(entry->urb->sg); - /* fall through */ + fallthrough; case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index e56cd562a66455e43162274f34befb3d3ec066c6..a475f48d43c44d2b16f83c565615bf8289e488eb 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -610,10 +610,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto err_xdp; @@ -745,10 +745,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0ada48edf74955c633a572fd70fa6eedab85dba9..263b005981bd7837277e65280c2ff3df38f17730 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -724,7 +724,7 @@ static struct sk_buff *receive_small(struct net_device *dev, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); case XDP_DROP: @@ -922,10 +922,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index def27afa1c69fa2dffad1466894ccb5c00721725..1014693a5ceb232a8674de096676beaedbec250f 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -743,7 +743,7 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter, case ESP_V4_FLOW: if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + fallthrough; case SCTP_V4_FLOW: case IPV4_FLOW: info->data |= RXH_IP_SRC | RXH_IP_DST; diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 7bcee41905cfb4cf02e7e436b75be1900de74e32..3ca4daf633897bc0f4002935b76818f232a4427b 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -295,14 +295,13 @@ static int dlci_close(struct net_device *dev) { struct dlci_local *dlp; struct frad_local *flp; - int err; netif_stop_queue(dev); dlp = netdev_priv(dev); flp = netdev_priv(dlp->slave); - err = (*flp->deactivate)(dlp->slave, dev); + (*flp->deactivate)(dlp->slave, dev); return 0; } diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index dfc16770458d84b62abd66aee4bd4b02dfa97539..9b00708676cf7123e022445434aa8f9ebb0f9a05 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -229,7 +229,8 @@ static void hdlc_setup_dev(struct net_device *dev) dev->min_mtu = 68; dev->max_mtu = HDLC_MAX_MTU; dev->type = ARPHRD_RAWHDLC; - dev->hard_header_len = 16; + dev->hard_header_len = 0; + dev->needed_headroom = 0; dev->addr_len = 0; dev->header_ops = &hdlc_null_ops; } diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index d8cba3625c185d76b5fd47e97eeaa1ab4900065a..444130655d8ea845ee7395a344f0a57c98aabddf 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -370,6 +370,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); spin_lock_init(&state(hdlc)->lock); dev->header_ops = &cisco_header_ops; + dev->hard_header_len = sizeof(struct hdlc_header); dev->type = ARPHRD_CISCO; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index f70336bb6f5244e3064a20a28f06dec5a0f9103f..f52b9fed05931d4b8fc273a6f2b0e5fe0ba2c3c4 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -107,8 +107,14 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) { int result; + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) { + kfree_skb(skb); + return NETDEV_TX_OK; + } - /* X.25 to LAPB */ switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); @@ -294,6 +300,15 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(&state(hdlc)->settings, &new_settings, size); + + /* There's no header_ops so hard_header_len should be 0. */ + dev->hard_header_len = 0; + /* When transmitting data: + * first we'll remove a pseudo header of 1 byte, + * then we'll prepend an LAPB header of at most 3 bytes. + */ + dev->needed_headroom = 3 - 1; + dev->type = ARPHRD_X25; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_off(dev); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 1ea15f2123ed5cf7d2bbec3fba0a3eedf8129dfd..732a6c1851f56ce50cf9e6c1d9059212e49982cd 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -173,7 +173,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, case X25_IFACE_DISCONNECT: if ((err = lapb_disconnect_request(dev)) != LAPB_OK) pr_err("lapb_disconnect_request err: %d\n", err); - /* Fall thru */ + fallthrough; default: goto drop; } @@ -210,6 +210,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb_reset_network_header(skb); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); @@ -340,6 +342,7 @@ static int lapbeth_new_device(struct net_device *dev) */ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + dev->needed_headroom; + ndev->needed_tailroom = dev->needed_tailroom; lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 77ccf3672ede7db77071d0cba13ad49ba8f3e4b3..bc2c1c7fb1a45aa8cbc6fd39e592275efe88df39 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -413,7 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int case SDLA_RET_NO_BUFS: if (cmd == SDLA_INFORMATION_WRITE) break; - /* Else, fall through */ + fallthrough; default: netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index de79844635958d000adce25ce617b10b94310a88..7ee980575208341b902d1e82285a26eb70e49d6b 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -330,7 +330,7 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, if (err != LAPB_OK) netdev_err(dev, "lapb_disconnect_request error: %d\n", err); - /* fall through */ + fallthrough; default: kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 4fe7c7e132c496611ddd1410c169cf2886adbc8b..9afed3b133d3f231a14ee83afb4a4017feb0f4b7 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -352,7 +352,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m, case I2400M_SS_IDLE: d_printf(1, dev, "entering BS-negotiated idle mode\n"); - /* Fall through */ + fallthrough; case I2400M_SS_DISCONNECTING: case I2400M_SS_DATA_PATH_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED); diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c index 1f7709d24f3526a1498a9f5a2f85e79c71ab811f..27ab233650d538c6aff52a7d015c14a99b36a6d9 100644 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ b/drivers/net/wimax/i2400m/usb-fw.c @@ -135,7 +135,7 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size) msleep(10); /* give the device some time */ goto retry; } - /* fall through */ + fallthrough; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c index 3a0e7226768ade6ee33081ee04d6f90de734c2a2..3ba9d70cca1b439bd530f1b04f8ea13debda3b5d 100644 --- a/drivers/net/wimax/i2400m/usb-tx.c +++ b/drivers/net/wimax/i2400m/usb-tx.c @@ -136,7 +136,7 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg, msleep(10); /* give the device some time */ goto retry; } - /* fall through */ + fallthrough; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 9659f9e1aaa64c8a5a993fe17868013f86d94523..b684e97ac976806354ce538506a0d77efbbb9ce6 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -195,7 +195,7 @@ int __i2400mu_send_barker(struct i2400mu *i2400mu, msleep(10); /* give the device some time */ goto retry; } - /* fall through */ + fallthrough; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 6b7532f7c936d9aee02814b443a12fd24c5c718f..ff96f22648efdeaa3bbac353f1b84c3883a01948 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -393,7 +393,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: seq_puts(m, "Hash Algorithm: NONE\n"); - /* FALLTHRU */ + fallthrough; default: return; } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 7e62a6ee76223bea8c8eccb2298fc659ba6af2ee..f1c1624cec8f5dffe6f57a3486187f9c4667dc5c 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -448,7 +448,7 @@ static void frontend_changed(struct xenbus_device *dev, set_backend_state(be, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through - if not online */ + fallthrough; /* if not online */ case XenbusStateUnknown: set_backend_state(be, XenbusStateClosed); device_unregister(&dev->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 458be6882b98869c36c7a6cd2f0440c4404d0f24..3e9895bec15f08c2239aad442f7e5659c4cb23a8 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2341,7 +2341,7 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* Fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c index 346e084387f7d125a48df07ade33ef0689f69239..f7464bd6d57cb2dbc9ef603f3a833ce8b3a344fb 100644 --- a/drivers/nfc/pn533/pn533.c +++ b/drivers/nfc/pn533/pn533.c @@ -2321,7 +2321,7 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, break; } - /* fall through */ + fallthrough; default: /* jumbo frame ? */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { @@ -2448,7 +2448,7 @@ static void pn533_wq_mi_recv(struct work_struct *work) break; } - /* fall through */ + fallthrough; default: skb_put_u8(skb, 1); /*TG*/ diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index 0b9ca6d20ffadc48e36d15185f2272f23f6024fd..8874d605b14f8d824ad36bc5e2035f2226e21ee4 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -611,7 +611,7 @@ static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb, switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) { case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU: pr_err("Received a ACK/NACK PDU\n"); - /* fall through */ + fallthrough; case ST21NFCA_NFC_DEP_PFB_I_PDU: info->dep_info.curr_nfc_dep_pni = ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1); diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 9642971e89ceae99733f18bbef78911a578fcd67..4578547659839983dfbbe9491374b641a1d36880 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, rc = down_killable(&stcontext->exchange_lock); if (rc) { WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); - return rc; + goto free_skb_resp; } rc = st95hf_spi_send(&stcontext->spicontext, skb->data, diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c index e46adaac1c6350fd0d308f38fb3aa787b06a20b5..3bd97c73f983f2478dcaa3f06289a6d3fa269c03 100644 --- a/drivers/nfc/trf7970a.c +++ b/drivers/nfc/trf7970a.c @@ -1153,7 +1153,7 @@ static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) dev_err(trf->dev, "%s - Invalid request: %d %d\n", __func__, trf->state, on); ret = -EINVAL; - /* FALLTHROUGH */ + fallthrough; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: case TRF7970A_ST_WAIT_FOR_RX_DATA: @@ -1960,7 +1960,7 @@ static void trf7970a_shutdown(struct trf7970a *trf) case TRF7970A_ST_WAIT_TO_ISSUE_EOF: case TRF7970A_ST_LISTENING: trf7970a_send_err_upstream(trf, -ECANCELED); - /* FALLTHROUGH */ + fallthrough; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: trf7970a_switch_rf_off(trf); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e6d1f5b298f34083217a66873b7acc283d1060c2..4a02561cfb965726c2fe2d73c9934f10adcbfb20 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1483,7 +1483,7 @@ static void ntb_rx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; - /* fall through */ + fallthrough; case DMA_TRANS_ABORTED: { struct ntb_transport_qp *qp = entry->qp; @@ -1739,7 +1739,7 @@ static void ntb_tx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; - /* fall through */ + fallthrough; case DMA_TRANS_ABORTED: { void __iomem *offset = diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 61374def515552f40ef6af782096652cdfe2a69d..b59032e0859b7f3a3fa50731ee8efff9c1fc5c8a 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -529,6 +529,7 @@ static DEVICE_ATTR_ADMIN_RW(activate); static struct attribute *nvdimm_firmware_attributes[] = { &dev_attr_activate.attr, &dev_attr_result.attr, + NULL, }; static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 88cff309d8e4f06a18661f8c10f8ff318a835834..f3a61a24d45f5a78d296fcdf6920ad5025c7e1fb 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -241,17 +241,6 @@ static blk_status_t nvme_error_status(u16 status) } } -static inline bool nvme_req_needs_retry(struct request *req) -{ - if (blk_noretry_request(req)) - return false; - if (nvme_req(req)->status & NVME_SC_DNR) - return false; - if (nvme_req(req)->retries >= nvme_max_retries) - return false; - return true; -} - static void nvme_retry_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; @@ -268,34 +257,67 @@ static void nvme_retry_req(struct request *req) blk_mq_delay_kick_requeue_list(req->q, delay); } -void nvme_complete_rq(struct request *req) +enum nvme_disposition { + COMPLETE, + RETRY, + FAILOVER, +}; + +static inline enum nvme_disposition nvme_decide_disposition(struct request *req) { - blk_status_t status = nvme_error_status(nvme_req(req)->status); + if (likely(nvme_req(req)->status == 0)) + return COMPLETE; - trace_nvme_complete_rq(req); + if (blk_noretry_request(req) || + (nvme_req(req)->status & NVME_SC_DNR) || + nvme_req(req)->retries >= nvme_max_retries) + return COMPLETE; - nvme_cleanup_cmd(req); + if (req->cmd_flags & REQ_NVME_MPATH) { + if (nvme_is_path_error(nvme_req(req)->status) || + blk_queue_dying(req->q)) + return FAILOVER; + } else { + if (blk_queue_dying(req->q)) + return COMPLETE; + } - if (nvme_req(req)->ctrl->kas) - nvme_req(req)->ctrl->comp_seen = true; + return RETRY; +} - if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) - return; +static inline void nvme_end_req(struct request *req) +{ + blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (!blk_queue_dying(req->q)) { - nvme_retry_req(req); - return; - } - } else if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - req_op(req) == REQ_OP_ZONE_APPEND) { + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && + req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = nvme_lba_to_sect(req->q->queuedata, le64_to_cpu(nvme_req(req)->result.u64)); - } nvme_trace_bio_complete(req, status); blk_mq_end_request(req, status); } + +void nvme_complete_rq(struct request *req) +{ + trace_nvme_complete_rq(req); + nvme_cleanup_cmd(req); + + if (nvme_req(req)->ctrl->kas) + nvme_req(req)->ctrl->comp_seen = true; + + switch (nvme_decide_disposition(req)) { + case COMPLETE: + nvme_end_req(req); + return; + case RETRY: + nvme_retry_req(req); + return; + case FAILOVER: + nvme_failover_req(req); + return; + } +} EXPORT_SYMBOL_GPL(nvme_complete_rq); bool nvme_cancel_request(struct request *req, void *data, bool reserved) @@ -330,7 +352,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -340,7 +362,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_NEW: case NVME_CTRL_LIVE: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -350,7 +372,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_NEW: case NVME_CTRL_RESETTING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -361,7 +383,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -371,7 +393,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_DELETING: case NVME_CTRL_DEAD: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -380,7 +402,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (old_state) { case NVME_CTRL_DELETING: changed = true; - /* FALLTHRU */ + fallthrough; default: break; } @@ -2004,13 +2026,49 @@ static void nvme_update_disk_info(struct gendisk *disk, blk_mq_unfreeze_queue(disk->queue); } +static inline bool nvme_first_scan(struct gendisk *disk) +{ + /* nvme_alloc_ns() scans the disk prior to adding it */ + return !(disk->flags & GENHD_FL_UP); +} + +static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + u32 iob; + + if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && + is_power_of_2(ctrl->max_hw_sectors)) + iob = ctrl->max_hw_sectors; + else + iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); + + if (!iob) + return; + + if (!is_power_of_2(iob)) { + if (nvme_first_scan(ns->disk)) + pr_warn("%s: ignoring unaligned IO boundary:%u\n", + ns->disk->disk_name, iob); + return; + } + + if (blk_queue_is_zoned(ns->disk->queue)) { + if (nvme_first_scan(ns->disk)) + pr_warn("%s: ignoring zoned namespace IO boundary\n", + ns->disk->disk_name); + return; + } + + blk_queue_chunk_sectors(ns->queue, iob); +} + static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) { unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; struct nvme_ns *ns = disk->private_data; struct nvme_ctrl *ctrl = ns->ctrl; int ret; - u32 iob; /* * If identify namespace failed, use default 512 byte block size so @@ -2038,12 +2096,6 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) return -ENODEV; } - if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && - is_power_of_2(ctrl->max_hw_sectors)) - iob = ctrl->max_hw_sectors; - else - iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); - ns->features = 0; ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); /* the PI implementation requires metadata equal t10 pi tuple size */ @@ -2075,8 +2127,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) } } - if (iob) - blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob)); + nvme_set_chunk_sectors(ns, id); nvme_update_disk_info(disk, ns, id); #ifdef CONFIG_NVME_MULTIPATH if (ns->head->disk) { @@ -2965,14 +3016,14 @@ static struct nvme_cel *nvme_find_cel(struct nvme_ctrl *ctrl, u8 csi) { struct nvme_cel *cel, *ret = NULL; - spin_lock(&ctrl->lock); + spin_lock_irq(&ctrl->lock); list_for_each_entry(cel, &ctrl->cels, entry) { if (cel->csi == csi) { ret = cel; break; } } - spin_unlock(&ctrl->lock); + spin_unlock_irq(&ctrl->lock); return ret; } @@ -2999,9 +3050,9 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, cel->csi = csi; - spin_lock(&ctrl->lock); + spin_lock_irq(&ctrl->lock); list_add_tail(&cel->entry, &ctrl->cels); - spin_unlock(&ctrl->lock); + spin_unlock_irq(&ctrl->lock); out: *log = &cel->log; return 0; @@ -3474,10 +3525,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - /* Can't delete non-created controllers */ - if (!ctrl->created) - return -EBUSY; - if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -3654,6 +3701,10 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, return 0; if (a == &dev_attr_hostid.attr && !ctrl->opts) return 0; + if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) + return 0; + if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) + return 0; return a->mode; } @@ -4348,7 +4399,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) nvme_queue_scan(ctrl); nvme_start_queues(ctrl); } - ctrl->created = true; } EXPORT_SYMBOL_GPL(nvme_start_ctrl); @@ -4368,7 +4418,7 @@ static void nvme_free_ctrl(struct device *dev) struct nvme_subsystem *subsys = ctrl->subsys; struct nvme_cel *cel, *next; - if (subsys && ctrl->instance != subsys->instance) + if (!subsys || ctrl->instance != subsys->instance) ida_simple_remove(&nvme_instance_ida, ctrl->instance); list_for_each_entry_safe(cel, next, &ctrl->cels, entry) { @@ -4512,7 +4562,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_unfreeze); -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) { struct nvme_ns *ns; @@ -4523,6 +4573,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) break; } up_read(&ctrl->namespaces_rwsem); + return timeout; } EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 4ec4829d62334bf042f1e96d2f001b104dcf4248..8575724734e02e5883f946d54cea6324a605fabf 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, struct nvme_request *req = nvme_req(rq); /* - * If we are in some state of setup or teardown only allow - * internally generated commands. + * currently we have a problem sending passthru commands + * on the admin_q if the controller is not LIVE because we can't + * make sure that they are going out after the admin connect, + * controller enable and/or other commands in the initialization + * sequence. until the controller will be LIVE, fail with + * BLK_STS_RESOURCE so that they will be rescheduled. */ - if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) + if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) return false; /* @@ -576,9 +580,8 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * which is require to set the queue live in the appropinquate states. */ switch (ctrl->state) { - case NVME_CTRL_NEW: case NVME_CTRL_CONNECTING: - if (nvme_is_fabrics(req->cmd) && + if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && req->cmd->fabrics.fctype == nvme_fabrics_type_connect) return true; break; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index eae43bb444e03888c6c149874cab431c6e724763..e8ef42b9d50c233f4ab3cd6e841db393d22bccf2 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2035,7 +2035,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) } __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); - if (!nvme_end_request(rq, status, result)) + if (!nvme_try_complete_req(rq, status, result)) nvme_fc_complete_rq(rq); check_error: @@ -2078,7 +2078,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; goto out_on_error; } @@ -2088,7 +2088,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); @@ -2160,6 +2160,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; + cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { __nvme_fc_exit_request(ctrl, aen_op); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3ded54d2c9c6ad41063d74ecb0ba6559d0e61c01..d4ba736c6c8905cda938db0d15ea4b540e50ccf0 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -65,51 +65,30 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } } -bool nvme_failover_req(struct request *req) +void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - u16 status = nvme_req(req)->status; + u16 status = nvme_req(req)->status & 0x7ff; unsigned long flags; - switch (status & 0x7ff) { - case NVME_SC_ANA_TRANSITION: - case NVME_SC_ANA_INACCESSIBLE: - case NVME_SC_ANA_PERSISTENT_LOSS: - /* - * If we got back an ANA error we know the controller is alive, - * but not ready to serve this namespaces. The spec suggests - * we should update our general state here, but due to the fact - * that the admin and I/O queues are not serialized that is - * fundamentally racy. So instead just clear the current path, - * mark the the path as pending and kick of a re-read of the ANA - * log page ASAP. - */ - nvme_mpath_clear_current_path(ns); - if (ns->ctrl->ana_log_buf) { - set_bit(NVME_NS_ANA_PENDING, &ns->flags); - queue_work(nvme_wq, &ns->ctrl->ana_work); - } - break; - case NVME_SC_HOST_PATH_ERROR: - case NVME_SC_HOST_ABORTED_CMD: - /* - * Temporary transport disruption in talking to the controller. - * Try to send on a new path. - */ - nvme_mpath_clear_current_path(ns); - break; - default: - /* This was a non-ANA error so follow the normal error path. */ - return false; + nvme_mpath_clear_current_path(ns); + + /* + * If we got back an ANA error, we know the controller is alive but not + * ready to serve this namespace. Kick of a re-read of the ANA + * information page, and just try any other available path for now. + */ + if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { + set_bit(NVME_NS_ANA_PENDING, &ns->flags); + queue_work(nvme_wq, &ns->ctrl->ana_work); } spin_lock_irqsave(&ns->head->requeue_lock, flags); blk_steal_bios(&ns->head->requeue_list, req); spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); + blk_mq_end_request(req, 0); kblockd_schedule_work(&ns->head->requeue_work); - return true; } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) @@ -233,7 +212,7 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, int node, struct nvme_ns *old) { - struct nvme_ns *ns, *found, *fallback = NULL; + struct nvme_ns *ns, *found = NULL; if (list_is_singular(&head->list)) { if (nvme_path_is_disabled(old)) @@ -252,18 +231,22 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, goto out; } if (ns->ana_state == NVME_ANA_NONOPTIMIZED) - fallback = ns; + found = ns; } - /* No optimized path found, re-check the current path */ + /* + * The loop above skips the current path for round-robin semantics. + * Fall back to the current path if either: + * - no other optimized path found and current is optimized, + * - no other usable path found and current is usable. + */ if (!nvme_path_is_disabled(old) && - old->ana_state == NVME_ANA_OPTIMIZED) { - found = old; - goto out; - } - if (!fallback) + (old->ana_state == NVME_ANA_OPTIMIZED || + (!found && old->ana_state == NVME_ANA_NONOPTIMIZED))) + return old; + + if (!found) return NULL; - found = fallback; out: rcu_assign_pointer(head->current_path[node], found); return found; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ebb8c3ed388554e4b2717e6ba6b6f7c7df328ba4..9fd45ff656da81472f36d83dcf5f0bb7dab66063 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -307,7 +307,6 @@ struct nvme_ctrl { struct nvme_command ka_cmd; struct work_struct fw_act_work; unsigned long events; - bool created; #ifdef CONFIG_NVME_MULTIPATH /* asymmetric namespace access: */ @@ -523,7 +522,31 @@ static inline u32 nvme_bytes_to_numd(size_t len) return (len >> 2) - 1; } -static inline bool nvme_end_request(struct request *req, __le16 status, +static inline bool nvme_is_ana_error(u16 status) +{ + switch (status & 0x7ff) { + case NVME_SC_ANA_TRANSITION: + case NVME_SC_ANA_INACCESSIBLE: + case NVME_SC_ANA_PERSISTENT_LOSS: + return true; + default: + return false; + } +} + +static inline bool nvme_is_path_error(u16 status) +{ + /* check for a status code type of 'path related status' */ + return (status & 0x700) == 0x300; +} + +/* + * Fill in the status and result information from the CQE, and then figure out + * if blk-mq will need to use IPI magic to complete the request, and if yes do + * so. If not let the caller complete the request without an indirect function + * call. + */ +static inline bool nvme_try_complete_req(struct request *req, __le16 status, union nvme_result result) { struct nvme_request *rq = nvme_req(req); @@ -581,7 +604,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_sync_queues(struct nvme_ctrl *ctrl); void nvme_unfreeze(struct nvme_ctrl *ctrl); void nvme_wait_freeze(struct nvme_ctrl *ctrl); -void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); +int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 @@ -629,7 +652,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -bool nvme_failover_req(struct request *req); +void nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); @@ -688,9 +711,8 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } -static inline bool nvme_failover_req(struct request *req) +static inline void nvme_failover_req(struct request *req) { - return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ba725ae47305ef618bdfadbd1d216bbb5ae36d85..899d2f4d7ab612057c46bf02734a02dfd8276ed9 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -120,7 +120,7 @@ struct nvme_dev { unsigned max_qid; unsigned io_queues[HCTX_MAX_TYPES]; unsigned int num_vecs; - u16 q_depth; + u32 q_depth; int io_sqes; u32 db_stride; void __iomem *bar; @@ -157,13 +157,13 @@ struct nvme_dev { static int io_queue_depth_set(const char *val, const struct kernel_param *kp) { int ret; - u16 n; + u32 n; - ret = kstrtou16(val, 10, &n); + ret = kstrtou32(val, 10, &n); if (ret != 0 || n < 2) return -EINVAL; - return param_set_ushort(val, kp); + return param_set_uint(val, kp); } static inline unsigned int sq_idx(unsigned int qid, u32 stride) @@ -195,7 +195,7 @@ struct nvme_queue { dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; u32 __iomem *q_db; - u16 q_depth; + u32 q_depth; u16 cq_vector; u16 sq_tail; u16 cq_head; @@ -961,7 +961,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); - if (!nvme_end_request(req, cqe->status, cqe->result)) + if (!nvme_try_complete_req(req, cqe->status, cqe->result)) nvme_pci_complete_rq(req); } @@ -1244,13 +1244,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) switch (dev->ctrl.state) { case NVME_CTRL_CONNECTING: nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); - /* fall through */ + fallthrough; case NVME_CTRL_DELETING: dev_warn_ratelimited(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); - nvme_dev_disable(dev, true); nvme_req(req)->flags |= NVME_REQ_CANCELLED; + nvme_dev_disable(dev, true); return BLK_EH_DONE; case NVME_CTRL_RESETTING: return BLK_EH_RESET_TIMER; @@ -1267,10 +1267,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_dev_disable(dev, false); nvme_reset_ctrl(&dev->ctrl); - nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_DONE; } @@ -2320,7 +2320,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); - dev->q_depth = min_t(u16, NVME_CAP_MQES(dev->ctrl.cap) + 1, + dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, io_queue_depth); dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); @@ -2460,7 +2460,8 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) static int nvme_setup_prp_pools(struct nvme_dev *dev) { dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, - PAGE_SIZE, PAGE_SIZE, 0); + NVME_CTRL_PAGE_SIZE, + NVME_CTRL_PAGE_SIZE, 0); if (!dev->prp_page_pool) return -ENOMEM; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 44c76ffbb264cdde539ee514e070151467ec432c..9e378d0a0c01c157ad24ab62595510c0c4f31eeb 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -122,6 +122,7 @@ struct nvme_rdma_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + struct mutex teardown_lock; bool use_inline_data; u32 io_queues[HCTX_MAX_TYPES]; }; @@ -834,6 +835,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; @@ -975,7 +977,15 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) if (!new) { nvme_start_queues(&ctrl->ctrl); - nvme_wait_freeze(&ctrl->ctrl); + if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + goto out_wait_freeze_timed_out; + } blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, ctrl->ctrl.queue_count - 1); nvme_unfreeze(&ctrl->ctrl); @@ -983,6 +993,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) return 0; +out_wait_freeze_timed_out: + nvme_stop_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); out_cleanup_connect_q: if (new) blk_cleanup_queue(ctrl->ctrl.connect_q); @@ -997,6 +1010,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); if (ctrl->ctrl.admin_tagset) { @@ -1007,11 +1021,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, if (remove) blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); if (ctrl->ctrl.queue_count > 1) { nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); @@ -1025,6 +1041,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); } + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) @@ -1180,6 +1197,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); queue_work(nvme_reset_wq, &ctrl->err_work); } @@ -1189,7 +1207,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req) if (!refcount_dec_and_test(&req->ref)) return; - if (!nvme_end_request(rq, req->status, req->result)) + if (!nvme_try_complete_req(rq, req->status, req->result)) nvme_rdma_complete_rq(rq); } @@ -1915,7 +1933,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: nvme_rdma_destroy_queue_ib(queue); - /* fall through */ + fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); @@ -1946,6 +1964,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +static void nvme_rdma_complete_timed_out(struct request *rq) +{ + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; + + /* fence other contexts that may complete the command */ + mutex_lock(&ctrl->teardown_lock); + nvme_rdma_stop_queue(queue); + if (!blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } + mutex_unlock(&ctrl->teardown_lock); +} + static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq, bool reserved) { @@ -1956,29 +1990,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved) dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", rq->tag, nvme_rdma_queue_idx(queue)); - /* - * Restart the timer if a controller reset is already scheduled. Any - * timed out commands would be handled before entering the connecting - * state. - */ - if (ctrl->ctrl.state == NVME_CTRL_RESETTING) - return BLK_EH_RESET_TIMER; - if (ctrl->ctrl.state != NVME_CTRL_LIVE) { /* - * Teardown immediately if controller times out while starting - * or we are already started error recovery. all outstanding - * requests are completed on shutdown, so we return BLK_EH_DONE. + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. */ - flush_work(&ctrl->err_work); - nvme_rdma_teardown_io_queues(ctrl, false); - nvme_rdma_teardown_admin_queue(ctrl, false); + nvme_rdma_complete_timed_out(rq); return BLK_EH_DONE; } - dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ nvme_rdma_error_recovery(ctrl); - return BLK_EH_RESET_TIMER; } @@ -2278,6 +2312,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); + mutex_init(&ctrl->teardown_lock); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 62fbaecdc960c30af87fc36b744d57411bd45dc6..8f4f29f18b8c9c954ecb129816a04e430eb77e3a 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -124,6 +124,7 @@ struct nvme_tcp_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + struct mutex teardown_lock; struct work_struct err_work; struct delayed_work connect_work; struct nvme_tcp_request async_req; @@ -464,6 +465,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) return; + dev_warn(ctrl->device, "starting error recovery\n"); queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } @@ -481,7 +483,7 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, return -EINVAL; } - if (!nvme_end_request(rq, cqe->status, cqe->result)) + if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) nvme_complete_rq(rq); queue->nr_cqe++; @@ -672,7 +674,7 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status) { union nvme_result res = {}; - if (!nvme_end_request(rq, cpu_to_le16(status << 1), res)) + if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) nvme_complete_rq(rq); } @@ -866,7 +868,6 @@ static void nvme_tcp_state_change(struct sock *sk) case TCP_LAST_ACK: case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: - /* fallthrough */ nvme_tcp_error_recovery(&queue->ctrl->ctrl); break; default: @@ -1527,7 +1528,6 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) return; - __nvme_tcp_stop_queue(queue); } @@ -1596,6 +1596,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) { if (to_tcp_ctrl(ctrl)->async_req.pdu) { + cancel_work_sync(&ctrl->async_event_work); nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); to_tcp_ctrl(ctrl)->async_req.pdu = NULL; } @@ -1782,7 +1783,15 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) if (!new) { nvme_start_queues(ctrl); - nvme_wait_freeze(ctrl); + if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { + /* + * If we timed out waiting for freeze we are likely to + * be stuck. Fail the controller initialization just + * to be safe. + */ + ret = -ENODEV; + goto out_wait_freeze_timed_out; + } blk_mq_update_nr_hw_queues(ctrl->tagset, ctrl->queue_count - 1); nvme_unfreeze(ctrl); @@ -1790,6 +1799,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) return 0; +out_wait_freeze_timed_out: + nvme_stop_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); out_cleanup_connect_q: if (new) blk_cleanup_queue(ctrl->connect_q); @@ -1875,6 +1887,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, bool remove) { + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); blk_mq_quiesce_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); if (ctrl->admin_tagset) { @@ -1885,13 +1898,16 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, if (remove) blk_mq_unquiesce_queue(ctrl->admin_q); nvme_tcp_destroy_admin_queue(ctrl, remove); + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, bool remove) { + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); if (ctrl->queue_count <= 1) - return; + goto out; + blk_mq_quiesce_queue(ctrl->admin_q); nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); @@ -1903,6 +1919,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (remove) nvme_start_queues(ctrl); nvme_tcp_destroy_io_queues(ctrl, remove); +out: + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) @@ -2149,40 +2167,55 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) nvme_tcp_queue_request(&ctrl->async_req, true, true); } +static void nvme_tcp_complete_timed_out(struct request *rq) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; + + /* fence other contexts that may complete the command */ + mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); + nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); + if (!blk_mq_request_completed(rq)) { + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; + blk_mq_complete_request(rq); + } + mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); +} + static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq, bool reserved) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; + struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_tcp_cmd_pdu *pdu = req->pdu; - /* - * Restart the timer if a controller reset is already scheduled. Any - * timed out commands would be handled before entering the connecting - * state. - */ - if (ctrl->ctrl.state == NVME_CTRL_RESETTING) - return BLK_EH_RESET_TIMER; - - dev_warn(ctrl->ctrl.device, + dev_warn(ctrl->device, "queue %d: timeout request %#x type %d\n", nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); - if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + if (ctrl->state != NVME_CTRL_LIVE) { /* - * Teardown immediately if controller times out while starting - * or we are already started error recovery. all outstanding - * requests are completed on shutdown, so we return BLK_EH_DONE. + * If we are resetting, connecting or deleting we should + * complete immediately because we may block controller + * teardown or setup sequence + * - ctrl disable/shutdown fabrics requests + * - connect requests + * - initialization admin requests + * - I/O requests that entered after unquiescing and + * the controller stopped responding + * + * All other requests should be cancelled by the error + * recovery work, so it's fine that we fail it here. */ - flush_work(&ctrl->err_work); - nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); - nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); + nvme_tcp_complete_timed_out(rq); return BLK_EH_DONE; } - dev_warn(ctrl->ctrl.device, "starting error recovery\n"); - nvme_tcp_error_recovery(&ctrl->ctrl); - + /* + * LIVE state should trigger the normal error recovery which will + * handle completing this request. + */ + nvme_tcp_error_recovery(ctrl); return BLK_EH_RESET_TIMER; } @@ -2423,6 +2456,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, nvme_tcp_reconnect_ctrl_work); INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); + mutex_init(&ctrl->teardown_lock); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 74b2b61c773bb30e977b5bd8fd39edcdc8e252ae..37e1d7784e175c1f8713d76728089a9747ec74a1 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1136,6 +1136,7 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, up_write(&nvmet_config_sem); kfree_rcu(new_model, rcuhead); + kfree(new_model_number); return count; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b92f45f5cd5b1675d5c9190a0e59c694432e5ed6..b7b63330b5efdf1ecf2a5c905d9910ea8b213dc3 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -73,7 +73,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) status = NVME_SC_ACCESS_DENIED; break; case -EIO: - /* FALLTHRU */ + fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); status = NVME_SC_INTERNAL | NVME_SC_DNR; @@ -397,6 +397,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -406,6 +409,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 55bafd56166a2e6948f29fd56ef9a46d9249cf9d..e6861cc10e7db36400af6faa36149242a71d7694 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -2342,9 +2342,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index c97e60b71bbc762004f32212383b43b265f58da5..3da067a8311e52158b38abc4ac1725d371b75035 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -812,7 +812,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, break; /* Fall-Thru to RSP handling */ - /* FALLTHRU */ + fallthrough; case NVMET_FCOP_RSP: if (fcpreq) { diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 3dd6f566a240f147a4e12561175d927de564641c..125dde3f410ee711de2e90009eb76f0c3e0d5255 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -139,7 +139,6 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) req->error_loc = offsetof(struct nvme_rw_command, nsid); break; case BLK_STS_IOERR: - /* fallthru */ default: status = NVME_SC_INTERNAL | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 4884ef1e46a2817743d06fa6d67b40da2ec953dc..0d6008cf66a2ada502dcbd6d71127f33e9ed90b9 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -115,7 +115,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req) return; } - if (!nvme_end_request(rq, cqe->status, cqe->result)) + if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) nvme_loop_complete_rq(rq); } } diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 89d91dc999a6db57ca56fdc561d84aa51a42ae02..8bd7f656e240b294f47c5f533976ce6ba8b1dfa5 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -165,7 +165,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, status); - blk_put_request(rq); + blk_mq_free_request(rq); } static void nvmet_passthru_req_done(struct request *rq, @@ -175,7 +175,7 @@ static void nvmet_passthru_req_done(struct request *rq, req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, nvme_req(rq)->status); - blk_put_request(rq); + blk_mq_free_request(rq); } static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) @@ -230,7 +230,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); status = NVME_SC_INVALID_NS | NVME_SC_DNR; - goto fail_out; + goto out; } q = ns->queue; @@ -238,16 +238,15 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); if (IS_ERR(rq)) { - rq = NULL; status = NVME_SC_INTERNAL; - goto fail_out; + goto out_put_ns; } if (req->sg_cnt) { ret = nvmet_passthru_map_sg(req, rq); if (unlikely(ret)) { status = NVME_SC_INTERNAL; - goto fail_out; + goto out_put_req; } } @@ -274,11 +273,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) return; -fail_out: +out_put_req: + blk_mq_free_request(rq); +out_put_ns: if (ns) nvme_put_ns(ns); +out: nvmet_req_complete(req, status); - blk_put_request(rq); } /* @@ -326,6 +327,10 @@ static u16 nvmet_setup_passthru_command(struct nvmet_req *req) u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) { + /* Reject any commands with non-sgl flags set (ie. fused commands) */ + if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) + return NVME_SC_INVALID_FIELD; + switch (req->cmd->common.opcode) { case nvme_cmd_resv_register: case nvme_cmd_resv_report: @@ -396,6 +401,10 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) { + /* Reject any commands with non-sgl flags set (ie. fused commands) */ + if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) + return NVME_SC_INVALID_FIELD; + /* * Passthru all vendor specific commands */ diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3ccb59260b4abbafe02c2578e1a04a040eb5f28f..ae6620489457d66c6f9074e616905d49bcea920d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1758,7 +1758,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, schedule_delayed_work(&port->repair_work, 0); break; } - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: nvmet_rdma_queue_disconnect(queue); @@ -1769,7 +1769,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_REJECTED: pr_debug("Connection rejected: %s\n", rdma_reject_msg(cm_id, event->status)); - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: nvmet_rdma_queue_connect_fail(cm_id, queue); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 9eda91162fe45d1aba836bf3811015e115f5aec0..8e0d766d272248aa32a7a85f1a4d6d54527755df 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -160,6 +160,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) { + if (unlikely(!queue->nr_cmds)) { + /* We didn't allocate cmds yet, send 0xffff */ + return USHRT_MAX; + } + return cmd - queue->cmds; } @@ -866,7 +871,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; - cmd = &queue->cmds[data->ttag]; + if (likely(queue->nr_cmds)) + cmd = &queue->cmds[data->ttag]; + else + cmd = &queue->connect; if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", diff --git a/drivers/of/address.c b/drivers/of/address.c index 590493e04b012ac651ab6a520923e13a0088a496..da4f7341323f22451c2a1a5eb07bc3cf24832efd 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -128,15 +128,29 @@ static unsigned int of_bus_pci_get_flags(const __be32 *addr) * PCI bus specific translator */ +static bool of_node_is_pcie(struct device_node *np) +{ + bool is_pcie = of_node_name_eq(np, "pcie"); + + if (is_pcie) + pr_warn_once("%pOF: Missing device_type\n", np); + + return is_pcie; +} + static int of_bus_pci_match(struct device_node *np) { /* * "pciex" is PCI Express * "vci" is for the /chaos bridge on 1st-gen PCI powermacs * "ht" is hypertransport + * + * If none of the device_type match, and that the node name is + * "pcie", accept the device as PCI (with a warning). */ return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") || - of_node_is_type(np, "vci") || of_node_is_type(np, "ht"); + of_node_is_type(np, "vci") || of_node_is_type(np, "ht") || + of_node_is_pcie(np); } static void of_bus_pci_count_cells(struct device_node *np, @@ -985,6 +999,11 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz /* Don't error out as we'd break some existing DTs */ continue; } + if (range.cpu_addr == OF_BAD_ADDR) { + pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n", + range.bus_addr, node); + continue; + } dma_offset = range.cpu_addr - range.bus_addr; /* Take lower and upper limits */ diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 9d7fb45b1786d2f666f86d3774ed554b33cb0025..3ca7543142bf36465a9df1ce4bfc3fa8a38b60e0 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -893,8 +893,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * have OPP table for the device, while others don't and * opp_set_rate() just needs to behave like clk_set_rate(). */ - if (!_get_opp_count(opp_table)) - return 0; + if (!_get_opp_count(opp_table)) { + ret = 0; + goto put_opp_table; + } if (!opp_table->required_opp_tables && !opp_table->regulators && !opp_table->paths) { @@ -905,7 +907,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) ret = _set_opp_bw(opp_table, NULL, dev, true); if (ret) - return ret; + goto put_opp_table; if (opp_table->regulator_enabled) { regulator_disable(opp_table->regulators[0]); @@ -932,10 +934,13 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) /* Return early if nothing to do */ if (old_freq == freq) { - dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", - __func__, freq); - ret = 0; - goto put_opp_table; + if (!opp_table->required_opp_tables && !opp_table->regulators && + !opp_table->paths) { + dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", + __func__, freq); + ret = 0; + goto put_opp_table; + } } /* @@ -1291,13 +1296,19 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); -void _opp_remove_all_static(struct opp_table *opp_table) +bool _opp_remove_all_static(struct opp_table *opp_table) { struct dev_pm_opp *opp, *tmp; + bool ret = true; mutex_lock(&opp_table->lock); - if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) + if (!opp_table->parsed_static_opps) { + ret = false; + goto unlock; + } + + if (--opp_table->parsed_static_opps) goto unlock; list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { @@ -1307,6 +1318,8 @@ void _opp_remove_all_static(struct opp_table *opp_table) unlock: mutex_unlock(&opp_table->lock); + + return ret; } /** @@ -2409,13 +2422,15 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) return; } - _opp_remove_all_static(opp_table); + /* + * Drop the extra reference only if the OPP table was successfully added + * with dev_pm_opp_of_add_table() earlier. + **/ + if (_opp_remove_all_static(opp_table)) + dev_pm_opp_put_opp_table(opp_table); /* Drop reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); - - /* Drop reference taken while the OPP table was added */ - dev_pm_opp_put_opp_table(opp_table); } /** diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index e51646ff279eb8e8b223b36df167115fc97869b0..c3fcd571e446ded1046da28312ba4d31868191f2 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -212,7 +212,7 @@ struct opp_table { /* Routines internal to opp core */ void dev_pm_opp_get(struct dev_pm_opp *opp); -void _opp_remove_all_static(struct opp_table *opp_table); +bool _opp_remove_all_static(struct opp_table *opp_table); void _get_opp_table_kref(struct opp_table *opp_table); int _get_opp_count(struct opp_table *opp_table); struct opp_table *_find_opp_table(struct device *dev); diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index f28d6a3c5a684adf789ef89b271477ade45a736b..4547ac44c8d48d6f8a42bcbdd6c51c44a7611f01 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -260,7 +260,7 @@ static void parport_ieee1284_terminate (struct parport *port) port->ieee1284.phase = IEEE1284_PH_FWD_IDLE; } - /* fall through */ + fallthrough; default: /* Terminate from all other modes. */ @@ -598,7 +598,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len) case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: parport_negotiate (port, IEEE1284_MODE_COMPAT); - /* fall through */ + fallthrough; case IEEE1284_MODE_COMPAT: pr_debug("%s: Using compatibility mode\n", port->name); fn = port->ops->compat_write_data; @@ -702,7 +702,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len) if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) { return -EIO; } - /* fall through - to NIBBLE */ + fallthrough; /* to NIBBLE */ case IEEE1284_MODE_NIBBLE: pr_debug("%s: Using nibble mode\n", port->name); fn = port->ops->nibble_read_data; diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 77e37e3cb3a091c65d31023ac3d850dd5559ecf2..eda4ded4d5e52a11f357e249a6826d893d227ceb 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1647,7 +1647,7 @@ static int parport_ECP_supported(struct parport *pb) break; default: pr_warn("0x%lx: Unknown implementation ID\n", pb->base); - /* Fall through - Assume 1 */ + fallthrough; /* Assume 1 */ case 1: pword = 1; } diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 90df28c7cb0cf0f225da68427214a81ed875790b..5fef2613b223ad2188b833f29f7db850b3ebc8b2 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -439,7 +439,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); break; - case IMX6QP: /* FALLTHROUGH */ + case IMX6QP: case IMX6Q: /* power up core phy and enable ref clock */ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, @@ -642,7 +642,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2); - /* FALLTHROUGH */ + fallthrough; default: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); @@ -1105,7 +1105,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) dev_err(dev, "pcie_aux clock source missing or invalid\n"); return PTR_ERR(imx6_pcie->pcie_aux); } - /* fall through */ + fallthrough; case IMX7D: if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) imx6_pcie->controller_id = 1; diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index c9530038ca9a53fcc66c0d9e792dc0072a522e84..afde4aa8f6dcbbf629612376ac377dc4ff8b7ae9 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -223,7 +223,7 @@ static void rcar_pci_setup(struct rcar_pci_priv *priv) pr_warn("unknown window size %ld - defaulting to 256M\n", window_size); window_size = SZ_256M; - /* fall-through */ + fallthrough; case SZ_256M: val |= RCAR_USBCTR_PCIAHB_WIN1_256M; break; diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index 5c93aa14f0de21ff008345037b2307fbeb7c3ed7..ae9acc77d14f1def561e80112a4b414f1c9af1b8 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -1941,7 +1941,7 @@ static int __init update_bridge_ranges(struct bus_node **bus) break; case PCI_HEADER_TYPE_BRIDGE: function = 0x8; - /* fall through */ + fallthrough; case PCI_HEADER_TYPE_MULTIBRIDGE: /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 6503d15effbbd314b4cfbc60358262fbbb23193f..9f85815b4f53403dd7f5dd3a0d23384232c25e0c 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -236,7 +236,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) switch (ctrl->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&ctrl->button_work); - /* fall through */ + fallthrough; case ON_STATE: ctrl->state = POWEROFF_STATE; mutex_unlock(&ctrl->state_lock); @@ -265,7 +265,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) switch (ctrl->state) { case BLINKINGON_STATE: cancel_delayed_work(&ctrl->button_work); - /* fall through */ + fallthrough; case OFF_STATE: ctrl->state = POWERON_STATE; mutex_unlock(&ctrl->state_lock); diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index b59f84918fe060058d0f74c0e89fa39491b4f8d6..c9e790c74051f6215b7c8f1057234f2a169ee965 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -83,21 +83,19 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); struct pci_dev *pdev; - struct zpci_bus *zbus = zdev->zbus; int rc; if (!zpci_fn_configured(zdev->state)) return -EIO; - pdev = pci_get_slot(zbus->bus, zdev->devfn); - if (pdev) { - if (pci_num_vf(pdev)) - return -EBUSY; - - pci_stop_and_remove_bus_device_locked(pdev); + pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); + if (pdev && pci_num_vf(pdev)) { pci_dev_put(pdev); + return -EBUSY; } + zpci_remove_device(zdev); + rc = zpci_disable_device(zdev); if (rc) return rc; diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index afdc52d1cae75ede21970718e965601a30ee22ea..65502e3f7b4fb68aa43491d4fa94b2bffba61dd7 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -642,7 +642,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot) switch (p_slot->state) { case BLINKINGON_STATE: cancel_delayed_work(&p_slot->work); - /* fall through */ + fallthrough; case STATIC_STATE: p_slot->state = POWERON_STATE; mutex_unlock(&p_slot->lock); @@ -678,7 +678,7 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot) switch (p_slot->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&p_slot->work); - /* fall through */ + fallthrough; case STATIC_STATE: p_slot->state = POWEROFF_STATE; mutex_unlock(&p_slot->lock); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 64ebed129dbf5fee4c312cd4aaa9327f4eca3f5f..f357f9a32b3a57256abdcf24b8172e69a9bdfb37 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -556,13 +556,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, return -1; for (i = 0; i < num_clients; i++) { - if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) && - clients[i]->dma_ops == &dma_virt_ops) { +#ifdef CONFIG_DMA_VIRT_OPS + if (clients[i]->dma_ops == &dma_virt_ops) { if (verbose) dev_warn(clients[i], "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n"); return -1; } +#endif pci_client = find_parent_pci_dev(clients[i]); if (!pci_client) { @@ -842,9 +843,10 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap, * this should never happen because it will be prevented * by the check in pci_p2pdma_distance_many() */ - if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && - dev->dma_ops == &dma_virt_ops)) +#ifdef CONFIG_DMA_VIRT_OPS + if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops)) return 0; +#endif for_each_sg(sg, s, nents, i) { paddr = sg_phys(s); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a458c46d7e39d13c722d733cebc1c80d7f4b3ae1..e39c5499770ff4377766a72ccbb165e2bdbb917e 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1049,7 +1049,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) need_restore = true; - /* Fall-through - force to D0 */ + fallthrough; /* force to D0 */ default: pmcsr = 0; break; @@ -2541,7 +2541,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) case PCI_D2: if (pci_no_d1d2(dev)) break; - /* else, fall through */ + fallthrough; default: target_state = state; } diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index bd2b691fa7a3320ccab98cf0f0bc64b470fd81af..d35186b01d98380d7458734e2b83dce93ebe35ee 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -231,7 +231,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, } /* If arch decided it can't, fall through... */ #endif /* HAVE_PCI_MMAP */ - /* fall through */ + fallthrough; default: ret = -EINVAL; break; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bdf9b52567e030640b5ee98ad9e1cded7be37f72..2a589b6d6ed85ca9644dbe85e08ab7bf85455cb2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1730,7 +1730,7 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) case PCI_DEVICE_ID_JMICRON_JMB366: /* Redirect IDE second PATA port to the right spot */ conf5 |= (1 << 24); - /* Fall through */ + fallthrough; case PCI_DEVICE_ID_JMICRON_JMB361: case PCI_DEVICE_ID_JMICRON_JMB363: case PCI_DEVICE_ID_JMICRON_JMB369: @@ -2224,7 +2224,7 @@ static void quirk_netmos(struct pci_dev *dev) if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return; - /* else, fall through */ + fallthrough; case PCI_DEVICE_ID_NETMOS_9735: case PCI_DEVICE_ID_NETMOS_9745: case PCI_DEVICE_ID_NETMOS_9845: diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 3951e02b7dedad9dded202f4e79bf9a124bb3acf..2ce636937c6eaf5b2c23045a1de00d368fb8cb01 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1253,7 +1253,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) additional_mmio_size = pci_hotplug_mmio_size; additional_mmio_pref_size = pci_hotplug_mmio_pref_size; } - /* Fall through */ + fallthrough; default: pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index fab267e359e74004c3881ede7b32780893bc8d35..c0e85be598c11272c1b2c3a0e508f8d876257450 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -1096,7 +1096,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev, case XenbusStateClosed: if (xdev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: dev_warn(&xdev->dev, "backend going away!\n"); pcifront_try_disconnect(pdev); diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 590e594092f25f080314d08316dd26b2b2831f8b..a7c7c7cd232698d53c1e8c8b1c04947af94d97e0 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -255,10 +255,10 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt, switch (state->Vcc) { case 50: ++v; - /* fall through */ + fallthrough; case 33: ++v; - /* fall through */ + fallthrough; case 0: break; default: @@ -269,11 +269,11 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt, switch (state->Vpp) { case 12: ++p; - /* fall through */ + fallthrough; case 33: case 50: ++p; - /* fall through */ + fallthrough; case 0: break; default: diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 7b7d23f2571390c3715ac778ef555873cd8a2eab..a0a71c1df042abf25ff8935aa95ca80f7a1565bf 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1404,7 +1404,7 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, break; case CCN_TYPE_SBAS: ccn->sbas_present = 1; - /* Fall-through */ + fallthrough; default: component = &ccn->node[id]; break; diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index e51ddb6d63eda31acbd1a78c6060ff2e223ec44e..cc00915ad6d19aa957bbf718f3f7a1e49496c11d 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1002,7 +1002,7 @@ static void __arm_spe_pmu_dev_probe(void *info) default: dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n", fld); - /* Fallthrough */ + fallthrough; case 8: spe_pmu->min_period = 4096; } @@ -1021,7 +1021,7 @@ static void __arm_spe_pmu_dev_probe(void *info) default: dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", fld); - /* Fallthrough */ + fallthrough; case 2: spe_pmu->counter_sz = 12; } diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c index 71f257b4a7f5d1f8a161ed579dc43e7affdea5e4..9061ece7ff6ab1e42aee9ee13528ee3dabf35a2c 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c @@ -505,9 +505,9 @@ static int qcom_ipq806x_usb_phy_probe(struct platform_device *pdev) size = resource_size(res); phy_dwc3->base = devm_ioremap(phy_dwc3->dev, res->start, size); - if (IS_ERR(phy_dwc3->base)) { + if (!phy_dwc3->base) { dev_err(phy_dwc3->dev, "failed to map reg\n"); - return PTR_ERR(phy_dwc3->base); + return -ENOMEM; } phy_dwc3->ref_clk = devm_clk_get(phy_dwc3->dev, "ref"); @@ -557,7 +557,6 @@ static struct platform_driver qcom_ipq806x_usb_phy_driver = { .probe = qcom_ipq806x_usb_phy_probe, .driver = { .name = "qcom-ipq806x-usb-phy", - .owner = THIS_MODULE, .of_match_table = qcom_ipq806x_usb_phy_table, }, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 562053ce9455536a5bf90a634805077f5765667e..6e6f992a9524043c8b7776fea4fde09d74e8d148 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -604,8 +604,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), @@ -631,7 +631,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), @@ -640,7 +639,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), - QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7), }; static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { @@ -648,6 +646,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36), + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), }; static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { @@ -658,7 +658,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), - QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4), }; static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { @@ -2046,6 +2045,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .pwrdn_ctrl = SW_PWRDN, }; +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; /* list of resets */ static const char * const ipq8074_pciephy_reset_l[] = { "phy", "common", @@ -2063,8 +2065,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), .pcs_tbl = ipq8074_pcie_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), - .clk_list = NULL, - .num_clks = 0, + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), .vreg_list = NULL, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 4277f592684b6a8ab135928635d9b3e2e450e81c..904b80ab90090dd2abe598bccc65c525c9b126d9 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -77,6 +77,8 @@ #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc /* Only for QMP V2 PHY - TX registers */ +#define QSERDES_TX_EMP_POST1_LVL 0x018 +#define QSERDES_TX_SLEW_CNTL 0x040 #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 #define QSERDES_TX_DEBUG_BUS_SEL 0x064 #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c index 61054272a7c8be306df65509793ae91fb76670d1..327df1a99f77325a1f229cc1638402f5e8bbe5d6 100644 --- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c +++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c @@ -53,7 +53,7 @@ static int qcom_usb_hs_phy_set_mode(struct phy *phy, case PHY_MODE_USB_OTG: case PHY_MODE_USB_HOST: val |= ULPI_INT_IDGRD; - /* fall through */ + fallthrough; case PHY_MODE_USB_DEVICE: val |= ULPI_INT_SESS_VALID; default: diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index a84e9f027fc4692b17ccee5ace4285e0ede991be..46ebdb1460a3de4753794adb2577e035e6666b9b 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -546,7 +546,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work) rport->state = OTG_STATE_B_IDLE; if (!vbus_attach) rockchip_usb2phy_power_off(rport->phy); - /* fall through */ + fallthrough; case OTG_STATE_B_IDLE: if (extcon_get_state(rphy->edev, EXTCON_USB_HOST) > 0) { dev_dbg(&rport->phy->dev, "usb otg host connect\n"); @@ -754,11 +754,11 @@ static void rockchip_chg_detect_work(struct work_struct *work) rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP; else rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP; - /* fall through */ + fallthrough; case USB_CHG_STATE_SECONDARY_DONE: rphy->chg_state = USB_CHG_STATE_DETECTED; delay = 0; - /* fall through */ + fallthrough; case USB_CHG_STATE_DETECTED: /* put the controller in normal mode */ property_enable(base, &rphy->phy_cfg->chg_det.opmode, true); @@ -835,7 +835,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work) dev_dbg(&rport->phy->dev, "FS/LS online\n"); break; } - /* fall through */ + fallthrough; case PHY_STATE_CONNECT: if (rport->suspended) { dev_dbg(&rport->phy->dev, "Connected\n"); diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index cb2dd3230fa76f1deab0adc01a308d185509d724..507f79d14adb80f461730fb86818cc70ce858cc1 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -22,10 +22,15 @@ #include #include #include +#include #define USB2PHY_ANA_CONFIG1 0x4c #define USB2PHY_DISCON_BYP_LATCH BIT(31) +#define USB2PHY_CHRG_DET 0x14 +#define USB2PHY_CHRG_DET_USE_CHG_DET_REG BIT(29) +#define USB2PHY_CHRG_DET_DIS_CHG_DET BIT(28) + /* SoC Specific USB2_OTG register definitions */ #define AM654_USB2_OTG_PD BIT(8) #define AM654_USB2_VBUS_DET_EN BIT(5) @@ -43,6 +48,7 @@ #define OMAP_USB2_HAS_START_SRP BIT(0) #define OMAP_USB2_HAS_SET_VBUS BIT(1) #define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2) +#define OMAP_USB2_DISABLE_CHRG_DET BIT(3) struct omap_usb { struct usb_phy phy; @@ -236,6 +242,13 @@ static int omap_usb_init(struct phy *x) omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val); } + if (phy->flags & OMAP_USB2_DISABLE_CHRG_DET) { + val = omap_usb_readl(phy->phy_base, USB2PHY_CHRG_DET); + val |= USB2PHY_CHRG_DET_USE_CHG_DET_REG | + USB2PHY_CHRG_DET_DIS_CHG_DET; + omap_usb_writel(phy->phy_base, USB2PHY_CHRG_DET, val); + } + return 0; } @@ -329,6 +342,26 @@ static const struct of_device_id omap_usb2_id_table[] = { }; MODULE_DEVICE_TABLE(of, omap_usb2_id_table); +static void omap_usb2_init_errata(struct omap_usb *phy) +{ + static const struct soc_device_attribute am65x_sr10_soc_devices[] = { + { .family = "AM65X", .revision = "SR1.0" }, + { /* sentinel */ } + }; + + /* + * Errata i2075: USB2PHY: USB2PHY Charger Detect is Enabled by + * Default Without VBUS Presence. + * + * AM654x SR1.0 has a silicon bug due to which D+ is pulled high after + * POR, which could cause enumeration failure with some USB hubs. + * Disabling the USB2_PHY Charger Detect function will put D+ + * into the normal state. + */ + if (soc_device_match(am65x_sr10_soc_devices)) + phy->flags |= OMAP_USB2_DISABLE_CHRG_DET; +} + static int omap_usb2_probe(struct platform_device *pdev) { struct omap_usb *phy; @@ -366,14 +399,14 @@ static int omap_usb2_probe(struct platform_device *pdev) phy->mask = phy_data->mask; phy->power_on = phy_data->power_on; phy->power_off = phy_data->power_off; + phy->flags = phy_data->flags; - if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->phy_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(phy->phy_base)) - return PTR_ERR(phy->phy_base); - phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT; - } + omap_usb2_init_errata(phy); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + phy->phy_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(phy->phy_base)) + return PTR_ERR(phy->phy_base); phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node, "syscon-phy-power"); diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 5e1d14e35f20b19a154e028a5d5ca52166e2c066..0d46706afd2da352e6825b6d8bb04467388c4bf7 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c @@ -431,7 +431,7 @@ static void olpc_xo175_ec_complete(void *arg) input_sync(priv->pwrbtn); input_report_key(priv->pwrbtn, KEY_POWER, 0); input_sync(priv->pwrbtn); - /* fall through */ + fallthrough; case EVENT_POWER_PRESS_WAKE: case EVENT_TIMED_HOST_WAKE: pm_wakeup_event(priv->pwrbtn->dev.parent, diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 60c18f21588dd5d94a0beb343e8da6d3120d2a7b..49f4b73be513f515af3838e4af451c2071e945a7 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -1001,7 +1001,7 @@ static acpi_status WMID_get_u32(u32 *value, u32 cap) *value = tmp & 0x1; return 0; } - /* fall through */ + fallthrough; default: return AE_ERROR; } @@ -1328,7 +1328,7 @@ static acpi_status get_u32(u32 *value, u32 cap) status = AMW0_get_u32(value, cap); break; } - /* fall through */ + fallthrough; case ACER_WMID: status = WMID_get_u32(value, cap); break; @@ -1371,7 +1371,7 @@ static acpi_status set_u32(u32 value, u32 cap) return AMW0_set_u32(value, cap); } - /* fall through */ + fallthrough; case ACER_WMID: return WMID_set_u32(value, cap); case ACER_WMID_v2: @@ -1381,7 +1381,7 @@ static acpi_status set_u32(u32 value, u32 cap) return wmid_v2_set_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) return WMID_set_u32(value, cap); - /* fall through */ + fallthrough; default: return AE_BAD_PARAMETER; } diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 5e9c2296931c9a750c7030add75168919ca1e6f1..70edc5bb3a146082599c348a07f5a1d930e88c25 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -1587,10 +1587,10 @@ static ssize_t kbd_led_timeout_store(struct device *dev, switch (unit) { case KBD_TIMEOUT_DAYS: value *= 24; - /* fall through */ + fallthrough; case KBD_TIMEOUT_HOURS: value *= 60; - /* fall through */ + fallthrough; case KBD_TIMEOUT_MINUTES: value *= 60; unit = KBD_TIMEOUT_SECONDS; diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c index ec515223f6549d8cbc8968d1f13780f21d65686a..d8afed5db94c59f69f58e63e24e5976e9fae2ee7 100644 --- a/drivers/platform/x86/surfacepro3_button.c +++ b/drivers/platform/x86/surfacepro3_button.c @@ -84,28 +84,28 @@ static void surface_button_notify(struct acpi_device *device, u32 event) /* Power button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_POWER: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_POWER: key_code = KEY_POWER; break; /* Home button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_HOME: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_HOME: key_code = KEY_LEFTMETA; break; /* Volume up button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP: key_code = KEY_VOLUMEUP; break; /* Volume down button press,release handle */ case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN: pressed = true; - /*fall through*/ + fallthrough; case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN: key_code = KEY_VOLUMEDOWN; break; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 4864a5c189d464a9ef4327416a7f9d0ae1aaca34..9c4df41687a39fee9275ef9d12d1fdf30b37e613 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -4060,7 +4060,7 @@ static bool hotkey_notify_6xxx(const u32 hkey, * AC status changed; can be triggered by plugging or * unplugging AC adapter, docking or undocking. */ - /* fallthrough */ + fallthrough; case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: @@ -4176,7 +4176,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event) known_ev = true; break; } - /* fallthrough - to default */ + fallthrough; /* to default */ default: known_ev = false; } @@ -6266,7 +6266,7 @@ static int thermal_get_sensor(int idx, s32 *value) idx -= 8; } #endif - /* fallthrough */ + fallthrough; case TPACPI_THERMAL_TPEC_8: if (idx <= 7) { if (!acpi_ec_read(t + idx, &tmp)) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 36fff00af9ebd482d2dce7b10e25281767c6d188..e557d757c64708f6ebef5b6411c7917679ab6c86 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -2748,7 +2748,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev) result = hci_write(dev, HCI_SYSTEM_EVENT, 1); if (result == TOS_SUCCESS) pr_notice("Re-enabled hotkeys\n"); - /* Fall through */ + fallthrough; default: retries--; break; diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 9469fe182d02a1e333e5f305083bfdcc931c84d8..db65be0269206dd043acea31ed78c53533b519b6 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -748,7 +748,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, USB_CH_IP_CUR_LVL_1P5; break; } - /* else, fall through */ + fallthrough; case USB_STAT_HM_IDGND: dev_err(di->dev, "USB Type - Charging not allowed\n"); di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; @@ -2410,7 +2410,7 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work) * of 1sec for enabling charging */ msleep(1000); - /* Intentional fall through */ + fallthrough; case AB8500_BM_USB_STATE_CONFIGURED: /* * USB is configured, enable charging with the charging diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 751c4f6c7487160d3725ecd1d3e723f58ec8fb2f..7eec415c82a3525b9dcbe83cc01aa8fe8e457e30 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -1542,7 +1542,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di) ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INITMEASURING); - /* Intentional fallthrough */ + fallthrough; case AB8500_FG_DISCHARGE_INITMEASURING: /* * Discard a number of samples during startup. @@ -1572,7 +1572,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di) ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_RECOVERY); - /* Intentional fallthrough */ + fallthrough; case AB8500_FG_DISCHARGE_RECOVERY: sleep_time = di->bm->fg_params->recovery_sleep_timer; diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c index 2fb33a07879a3b6af8a53df920e47bca119cf0a1..175c4f3d795534143b231779dc72c74401ce3e6e 100644 --- a/drivers/power/supply/abx500_chargalg.c +++ b/drivers/power/supply/abx500_chargalg.c @@ -1419,7 +1419,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) abx500_chargalg_stop_charging(di); di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING; abx500_chargalg_state_to(di, STATE_HANDHELD); - /* Intentional fallthrough */ + fallthrough; case STATE_HANDHELD: break; @@ -1435,7 +1435,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->maintenance_chg = false; abx500_chargalg_state_to(di, STATE_SUSPENDED); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough */ + fallthrough; case STATE_SUSPENDED: /* CHARGING is suspended */ @@ -1444,7 +1444,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_BATT_REMOVED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_BATT_REMOVED); - /* Intentional fallthrough */ + fallthrough; case STATE_BATT_REMOVED: if (!di->events.batt_rem) @@ -1454,7 +1454,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_HW_TEMP_PROTECT_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT); - /* Intentional fallthrough */ + fallthrough; case STATE_HW_TEMP_PROTECT: if (!di->events.main_thermal_prot && @@ -1465,7 +1465,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_OVV_PROTECT_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_OVV_PROTECT); - /* Intentional fallthrough */ + fallthrough; case STATE_OVV_PROTECT: if (!di->events.vbus_ovv && @@ -1479,7 +1479,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_CHG_NOT_OK_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_CHG_NOT_OK); - /* Intentional fallthrough */ + fallthrough; case STATE_CHG_NOT_OK: if (!di->events.mainextchnotok && @@ -1490,7 +1490,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_SAFETY_TIMER_EXPIRED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED); - /* Intentional fallthrough */ + fallthrough; case STATE_SAFETY_TIMER_EXPIRED: /* We exit this state when charger is removed */ @@ -1537,7 +1537,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_WAIT_FOR_RECHARGE_INIT: abx500_chargalg_hold_charging(di); abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE); - /* Intentional fallthrough */ + fallthrough; case STATE_WAIT_FOR_RECHARGE: if (di->batt_data.percent <= @@ -1558,7 +1558,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->bm->batt_id].maint_a_cur_lvl); abx500_chargalg_state_to(di, STATE_MAINTENANCE_A); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough*/ + fallthrough; case STATE_MAINTENANCE_A: if (di->events.maintenance_timer_expired) { @@ -1578,7 +1578,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->bm->batt_id].maint_b_cur_lvl); abx500_chargalg_state_to(di, STATE_MAINTENANCE_B); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough*/ + fallthrough; case STATE_MAINTENANCE_B: if (di->events.maintenance_timer_expired) { @@ -1597,7 +1597,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) di->charge_status = POWER_SUPPLY_STATUS_CHARGING; abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH); power_supply_changed(di->chargalg_psy); - /* Intentional fallthrough */ + fallthrough; case STATE_TEMP_LOWHIGH: if (!di->events.btemp_lowhigh) @@ -1607,7 +1607,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_WD_EXPIRED_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_WD_EXPIRED); - /* Intentional fallthrough */ + fallthrough; case STATE_WD_EXPIRED: if (!di->events.ac_wd_expired && @@ -1618,7 +1618,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di) case STATE_TEMP_UNDEROVER_INIT: abx500_chargalg_stop_charging(di); abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER); - /* Intentional fallthrough */ + fallthrough; case STATE_TEMP_UNDEROVER: if (!di->events.btemp_underover) diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c index d01dc0332edc7826c085d9147d25bb0e4a751327..0eaa86c52874a2e3360be35e1d93f7cd83eaa9e0 100644 --- a/drivers/power/supply/axp20x_usb_power.c +++ b/drivers/power/supply/axp20x_usb_power.c @@ -349,7 +349,7 @@ static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power, case 100000: if (power->axp20x_id == AXP221_ID) return -EINVAL; - /* fall through */ + fallthrough; case 500000: case 900000: val = (900000 - intval) / 400000; diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index 2a45e84447fe7cb9f2b9becbf31d2e56f087a242..d89e08efd2ad0f0719d32876605a5311af14af35 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -383,7 +383,7 @@ static int cros_usbpd_charger_get_prop(struct power_supply *psy, */ if (ec_device->mkbp_event_supported || port->psy_online) break; - /* fall through */ + fallthrough; case POWER_SUPPLY_PROP_CURRENT_MAX: case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: case POWER_SUPPLY_PROP_VOLTAGE_NOW: diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c index 5fca4960f440ae00cc740f44e97b8c03470779fd..8878f9131184f716b5512c89125bff3b13cee02a 100644 --- a/drivers/power/supply/max8925_power.c +++ b/drivers/power/supply/max8925_power.c @@ -121,7 +121,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data) case MAX8925_IRQ_VCHG_THM_OK_F: /* Battery is not ready yet */ dev_dbg(chip->dev, "Battery temperature is out of range\n"); - /* Fall through */ + fallthrough; case MAX8925_IRQ_VCHG_DC_OVP: dev_dbg(chip->dev, "Error detection\n"); __set_charger(info, 0); diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c index 65832bc229f6c7b67f14ffa85c57d68c3dfc0a6a..18b33f14dfeef635453d57015720ca329d82abd7 100644 --- a/drivers/power/supply/wm831x_power.c +++ b/drivers/power/supply/wm831x_power.c @@ -665,7 +665,7 @@ static int wm831x_power_probe(struct platform_device *pdev) break; default: dev_err(&pdev->dev, "Failed to find USB phy: %d\n", ret); - /* fall-through */ + fallthrough; case -EPROBE_DEFER: goto err_bat_irq; break; diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index 26923af574f430118067faaffbf2b0faaf90f067..e05cee457471bf1ff39db193930f5090309e1901 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -227,7 +227,7 @@ static irqreturn_t wm8350_charger_handler(int irq, void *data) case WM8350_IRQ_EXT_USB_FB: case WM8350_IRQ_EXT_WALL_FB: wm8350_charger_config(wm8350, policy); - /* Fall through */ + fallthrough; case WM8350_IRQ_EXT_BAT_FB: power_supply_changed(power->battery); power_supply_changed(power->usb); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 6f55aaef8afc993dec7c82ac7a5473eb94ef5c74..25c764905f9bd01255a2dd2f7f94050c1865b475 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -1035,6 +1035,9 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server), X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt), diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c index 24f04ffdd9861ccbdd64efb75835cc489d0c7e5b..9d66257e1da5c0b37607a34d49711b30efec2879 100644 --- a/drivers/ps3/ps3av.c +++ b/drivers/ps3/ps3av.c @@ -769,7 +769,7 @@ static int ps3av_auto_videomode(struct ps3av_pkt_av_get_hw_conf *av_hw_conf) switch (info->monitor_type) { case PS3AV_MONITOR_TYPE_DVI: dvi = PS3AV_MODE_DVI; - /* fall through */ + fallthrough; case PS3AV_MONITOR_TYPE_HDMI: id = ps3av_hdmi_get_id(info); break; diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c index f0e650cc866e56566d05fedef4946ef83857d484..c22206652f06fd9dcaee9888ab6940fbec7389d6 100644 --- a/drivers/ps3/ps3av_cmd.c +++ b/drivers/ps3/ps3av_cmd.c @@ -693,11 +693,11 @@ void ps3av_cmd_set_audio_mode(struct ps3av_pkt_audio_mode *audio, u32 avport, switch (ch) { case PS3AV_CMD_AUDIO_NUM_OF_CH_8: audio->audio_enable[3] = 1; - /* fall through */ + fallthrough; case PS3AV_CMD_AUDIO_NUM_OF_CH_6: audio->audio_enable[2] = 1; audio->audio_enable[1] = 1; - /* fall through */ + fallthrough; case PS3AV_CMD_AUDIO_NUM_OF_CH_2: default: audio->audio_enable[0] = 1; diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 73aaae5574ed5623cbbd1d54c7d740ca9527a9c1..e020faff7da53199875467c50ed9dda27c8dac5c 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -142,16 +142,15 @@ static int idtcm_strverscmp(const char *ver1, const char *ver2) return result; } -static int idtcm_xfer(struct idtcm *idtcm, - u8 regaddr, - u8 *buf, - u16 count, - bool write) +static int idtcm_xfer_read(struct idtcm *idtcm, + u8 regaddr, + u8 *buf, + u16 count) { struct i2c_client *client = idtcm->client; struct i2c_msg msg[2]; int cnt; - char *fmt = "i2c_transfer failed at %d in %s for %s, at addr: %04X!\n"; + char *fmt = "i2c_transfer failed at %d in %s, at addr: %04X!\n"; msg[0].addr = client->addr; msg[0].flags = 0; @@ -159,7 +158,7 @@ static int idtcm_xfer(struct idtcm *idtcm, msg[0].buf = ®addr; msg[1].addr = client->addr; - msg[1].flags = write ? 0 : I2C_M_RD; + msg[1].flags = I2C_M_RD; msg[1].len = count; msg[1].buf = buf; @@ -170,7 +169,6 @@ static int idtcm_xfer(struct idtcm *idtcm, fmt, __LINE__, __func__, - write ? "write" : "read", regaddr); return cnt; } else if (cnt != 2) { @@ -182,6 +180,37 @@ static int idtcm_xfer(struct idtcm *idtcm, return 0; } +static int idtcm_xfer_write(struct idtcm *idtcm, + u8 regaddr, + u8 *buf, + u16 count) +{ + struct i2c_client *client = idtcm->client; + /* we add 1 byte for device register */ + u8 msg[IDTCM_MAX_WRITE_COUNT + 1]; + int cnt; + char *fmt = "i2c_master_send failed at %d in %s, at addr: %04X!\n"; + + if (count > IDTCM_MAX_WRITE_COUNT) + return -EINVAL; + + msg[0] = regaddr; + memcpy(&msg[1], buf, count); + + cnt = i2c_master_send(client, msg, count + 1); + + if (cnt < 0) { + dev_err(&client->dev, + fmt, + __LINE__, + __func__, + regaddr); + return cnt; + } + + return 0; +} + static int idtcm_page_offset(struct idtcm *idtcm, u8 val) { u8 buf[4]; @@ -195,7 +224,7 @@ static int idtcm_page_offset(struct idtcm *idtcm, u8 val) buf[2] = 0x10; buf[3] = 0x20; - err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1); + err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf)); if (err) { idtcm->page_offset = 0xff; @@ -223,11 +252,12 @@ static int _idtcm_rdwr(struct idtcm *idtcm, err = idtcm_page_offset(idtcm, hi); if (err) - goto out; + return err; - err = idtcm_xfer(idtcm, lo, buf, count, write); -out: - return err; + if (write) + return idtcm_xfer_write(idtcm, lo, buf, count); + + return idtcm_xfer_read(idtcm, lo, buf, count); } static int idtcm_read(struct idtcm *idtcm, diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h index ffae56c5d97fab3b809c562226a9ff513b6a0b00..82840d72364adb89012999ae3b1e5b79096b5d0d 100644 --- a/drivers/ptp/ptp_clockmatrix.h +++ b/drivers/ptp/ptp_clockmatrix.h @@ -55,6 +55,8 @@ #define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) +#define IDTCM_MAX_WRITE_COUNT (512) + /* Values of DPLL_N.DPLL_MODE.PLL_MODE */ enum pll_mode { PLL_MODE_MIN = 0, diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index e4c422d806bee0728d2e51fff25c5881e533d70f..b9f8514909bf016df3bced3d5e53b221355d72c5 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -37,7 +37,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS config RAPIDIO_DMA_ENGINE bool "DMA Engine support for RapidIO" depends on RAPIDIO - select DMADEVICES + depends on DMADEVICES select DMA_ENGINE help Say Y here if you want to use DMA Engine frameork for RapidIO data diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index c07ceec3c6d4971e6dbbc35e87326b55ff735a3d..a30342942e26f996cab50a7ace832f6886f7dd7b 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2150,7 +2150,7 @@ static void mport_release_mapping(struct kref *ref) switch (map->dir) { case MAP_INBOUND: rio_unmap_inb_region(mport, map->phys_addr); - /* fall through */ + fallthrough; case MAP_DMA: dma_free_coherent(mport->dev.parent, map->size, map->virt_addr, map->phys_addr); diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index fbc95cadaf5393d4cb27dfe7bdeaf31895c75e2d..1bacb37e8a99262ebe6902ea1fbb5660788fffe8 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -399,7 +399,7 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) if (rate_count > 0) break; - /* fall through */ + fallthrough; default: /* Not supported for this regulator */ return -ENOTSUPP; @@ -1022,7 +1022,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) * (See include/linux/mfd/axp20x.h) */ reg = AXP803_DCDC_FREQ_CTRL; - /* Fall through - to the check below.*/ + fallthrough; /* to the check below */ case AXP806_ID: /* * AXP806 also have DCDC work frequency setting register at a @@ -1030,7 +1030,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) */ if (axp20x->variant == AXP806_ID) reg = AXP806_DCDC_FREQ_CTRL; - /* Fall through */ + fallthrough; case AXP221_ID: case AXP223_ID: case AXP809_ID: @@ -1118,7 +1118,7 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work * (See include/linux/mfd/axp20x.h) */ reg = AXP806_DCDC_MODE_CTRL2; - /* Fall through - to the check below. */ + fallthrough; /* to the check below */ case AXP221_ID: case AXP223_ID: case AXP809_ID: diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 75ff7c563c5d601c75c1af357ee8645a6be0cc6d..7ff507ec875a85a8d904f30f947191091b1ccbfb 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -236,8 +236,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev) static void regulator_unlock_recursive(struct regulator_dev *rdev, unsigned int n_coupled) { - struct regulator_dev *c_rdev; - int i; + struct regulator_dev *c_rdev, *supply_rdev; + int i, supply_n_coupled; for (i = n_coupled; i > 0; i--) { c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1]; @@ -245,10 +245,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev, if (!c_rdev) continue; - if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) - regulator_unlock_recursive( - c_rdev->supply->rdev, - c_rdev->coupling_desc.n_coupled); + if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) { + supply_rdev = c_rdev->supply->rdev; + supply_n_coupled = supply_rdev->coupling_desc.n_coupled; + + regulator_unlock_recursive(supply_rdev, + supply_n_coupled); + } regulator_unlock(c_rdev); } @@ -1461,7 +1464,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { - struct regulator_map *node; + struct regulator_map *node, *new_node; int has_dev; if (supply == NULL) @@ -1472,6 +1475,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, else has_dev = 0; + new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); + if (new_node == NULL) + return -ENOMEM; + + new_node->regulator = rdev; + new_node->supply = supply; + + if (has_dev) { + new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); + if (new_node->dev_name == NULL) { + kfree(new_node); + return -ENOMEM; + } + } + + mutex_lock(®ulator_list_mutex); list_for_each_entry(node, ®ulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) @@ -1489,26 +1508,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); - return -EBUSY; + goto fail; } - node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); - if (node == NULL) - return -ENOMEM; - - node->regulator = rdev; - node->supply = supply; - - if (has_dev) { - node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); - if (node->dev_name == NULL) { - kfree(node); - return -ENOMEM; - } - } + list_add(&new_node->list, ®ulator_map_list); + mutex_unlock(®ulator_list_mutex); - list_add(&node->list, ®ulator_map_list); return 0; + +fail: + mutex_unlock(®ulator_list_mutex); + kfree(new_node->dev_name); + kfree(new_node); + return -EBUSY; } static void unset_regulator_supplies(struct regulator_dev *rdev) @@ -1580,44 +1592,53 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, const char *supply_name) { struct regulator *regulator; - char buf[REG_STR_SIZE]; - int err, size; + int err; + + if (dev) { + char buf[REG_STR_SIZE]; + int size; + + size = snprintf(buf, REG_STR_SIZE, "%s-%s", + dev->kobj.name, supply_name); + if (size >= REG_STR_SIZE) + return NULL; + + supply_name = kstrdup(buf, GFP_KERNEL); + if (supply_name == NULL) + return NULL; + } else { + supply_name = kstrdup_const(supply_name, GFP_KERNEL); + if (supply_name == NULL) + return NULL; + } regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); - if (regulator == NULL) + if (regulator == NULL) { + kfree(supply_name); return NULL; + } - regulator_lock(rdev); regulator->rdev = rdev; + regulator->supply_name = supply_name; + + regulator_lock(rdev); list_add(®ulator->list, &rdev->consumer_list); + regulator_unlock(rdev); if (dev) { regulator->dev = dev; /* Add a link to the device sysfs entry */ - size = snprintf(buf, REG_STR_SIZE, "%s-%s", - dev->kobj.name, supply_name); - if (size >= REG_STR_SIZE) - goto overflow_err; - - regulator->supply_name = kstrdup(buf, GFP_KERNEL); - if (regulator->supply_name == NULL) - goto overflow_err; - err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj, - buf); + supply_name); if (err) { rdev_dbg(rdev, "could not add device link %s err %d\n", dev->kobj.name, err); /* non-fatal */ } - } else { - regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL); - if (regulator->supply_name == NULL) - goto overflow_err; } - regulator->debugfs = debugfs_create_dir(regulator->supply_name, + regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs); if (!regulator->debugfs) { rdev_dbg(rdev, "Failed to create debugfs directory\n"); @@ -1642,13 +1663,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, _regulator_is_enabled(rdev)) regulator->always_on = true; - regulator_unlock(rdev); return regulator; -overflow_err: - list_del(®ulator->list); - kfree(regulator); - regulator_unlock(rdev); - return NULL; } static int _regulator_get_enable_time(struct regulator_dev *rdev) @@ -1895,7 +1910,7 @@ struct regulator *_regulator_get(struct device *dev, const char *id, case EXCLUSIVE_GET: dev_warn(dev, "dummy supplies not allowed for exclusive requests\n"); - /* fall through */ + fallthrough; default: return ERR_PTR(-ENODEV); @@ -2230,10 +2245,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); static int regulator_ena_gpio_request(struct regulator_dev *rdev, const struct regulator_config *config) { - struct regulator_enable_gpio *pin; + struct regulator_enable_gpio *pin, *new_pin; struct gpio_desc *gpiod; gpiod = config->ena_gpiod; + new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL); + + mutex_lock(®ulator_list_mutex); list_for_each_entry(pin, ®ulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { @@ -2242,9 +2260,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, } } - pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); - if (pin == NULL) + if (new_pin == NULL) { + mutex_unlock(®ulator_list_mutex); return -ENOMEM; + } + + pin = new_pin; + new_pin = NULL; pin->gpiod = gpiod; list_add(&pin->list, ®ulator_ena_gpio_list); @@ -2252,6 +2274,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, update_ena_gpio_to_rdev: pin->request_count++; rdev->ena_pin = pin; + + mutex_unlock(®ulator_list_mutex); + kfree(new_pin); + return 0; } @@ -2264,19 +2290,19 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) /* Free the GPIO only in case of no use */ list_for_each_entry_safe(pin, n, ®ulator_ena_gpio_list, list) { - if (pin->gpiod == rdev->ena_pin->gpiod) { - if (pin->request_count <= 1) { - pin->request_count = 0; - gpiod_put(pin->gpiod); - list_del(&pin->list); - kfree(pin); - rdev->ena_pin = NULL; - return; - } else { - pin->request_count--; - } - } + if (pin != rdev->ena_pin) + continue; + + if (--pin->request_count) + break; + + gpiod_put(pin->gpiod); + list_del(&pin->list); + kfree(pin); + break; } + + rdev->ena_pin = NULL; } /** @@ -4949,13 +4975,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev) return; } - regulator_lock(c_rdev); - c_desc->coupled_rdevs[i] = c_rdev; c_desc->n_resolved++; - regulator_unlock(c_rdev); - regulator_resolve_coupling(c_rdev); } } @@ -5040,7 +5062,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev) if (!of_check_coupling_data(rdev)) return -EPERM; + mutex_lock(®ulator_list_mutex); rdev->coupling_desc.coupler = regulator_find_coupler(rdev); + mutex_unlock(®ulator_list_mutex); + if (IS_ERR(rdev->coupling_desc.coupler)) { err = PTR_ERR(rdev->coupling_desc.coupler); rdev_err(rdev, "failed to get coupler: %d\n", err); @@ -5141,6 +5166,7 @@ regulator_register(const struct regulator_desc *regulator_desc, ret = -ENOMEM; goto rinse; } + device_initialize(&rdev->dev); /* * Duplicate the config so the driver could override it after @@ -5148,9 +5174,8 @@ regulator_register(const struct regulator_desc *regulator_desc, */ config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); if (config == NULL) { - kfree(rdev); ret = -ENOMEM; - goto rinse; + goto clean; } init_data = regulator_of_get_init_data(dev, regulator_desc, config, @@ -5162,10 +5187,8 @@ regulator_register(const struct regulator_desc *regulator_desc, * from a gpio extender or something else. */ if (PTR_ERR(init_data) == -EPROBE_DEFER) { - kfree(config); - kfree(rdev); ret = -EPROBE_DEFER; - goto rinse; + goto clean; } /* @@ -5206,9 +5229,7 @@ regulator_register(const struct regulator_desc *regulator_desc, } if (config->ena_gpiod) { - mutex_lock(®ulator_list_mutex); ret = regulator_ena_gpio_request(rdev, config); - mutex_unlock(®ulator_list_mutex); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO: %d\n", ret); @@ -5220,7 +5241,6 @@ regulator_register(const struct regulator_desc *regulator_desc, } /* register with sysfs */ - device_initialize(&rdev->dev); rdev->dev.class = ®ulator_class; rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%lu", @@ -5248,27 +5268,22 @@ regulator_register(const struct regulator_desc *regulator_desc, if (ret < 0) goto wash; - mutex_lock(®ulator_list_mutex); ret = regulator_init_coupling(rdev); - mutex_unlock(®ulator_list_mutex); if (ret < 0) goto wash; /* add consumers devices */ if (init_data) { - mutex_lock(®ulator_list_mutex); for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { - mutex_unlock(®ulator_list_mutex); dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } - mutex_unlock(®ulator_list_mutex); } if (!rdev->desc->ops->get_voltage && @@ -5303,13 +5318,11 @@ regulator_register(const struct regulator_desc *regulator_desc, mutex_lock(®ulator_list_mutex); regulator_ena_gpio_free(rdev); mutex_unlock(®ulator_list_mutex); - put_device(&rdev->dev); - rdev = NULL; clean: if (dangling_of_gpiod) gpiod_put(config->ena_gpiod); - kfree(rdev); kfree(config); + put_device(&rdev->dev); rinse: if (dangling_cfg_gpiod) gpiod_put(cfg->ena_gpiod); diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c index 3117bbd2826b2d1e42db29853d2541f9e5a0ad92..eb3fc1db4edc8118b0be4133e559e10e1f7a233d 100644 --- a/drivers/regulator/cros-ec-regulator.c +++ b/drivers/regulator/cros-ec-regulator.c @@ -170,6 +170,9 @@ static int cros_ec_regulator_init_info(struct device *dev, data->voltages_mV = devm_kmemdup(dev, resp.voltages_mv, sizeof(u16) * data->num_voltages, GFP_KERNEL); + if (!data->voltages_mV) + return -ENOMEM; + data->desc.n_voltages = data->num_voltages; /* Make sure the returned name is always a valid string */ diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index d54830e48b8dc5671d921b919a7f24db71c8fe38..142a70a8915369423b01d42602d31ddd1aea8b7f 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -182,7 +182,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) drvdata->enable_clock = devm_clk_get(dev, NULL); if (IS_ERR(drvdata->enable_clock)) { - dev_err(dev, "Cant get enable-clock from devicetree\n"); + dev_err(dev, "Can't get enable-clock from devicetree\n"); return -ENOENT; } } else { diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index 3234b118b53ea275f94aa2831c88e2835a7b6a82..990bd50771d8df73885e256e8cb46d3aa6206974 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -279,7 +279,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, return ret; } - drvdata->state = -EINVAL; + drvdata->state = -ENOTRECOVERABLE; drvdata->duty_cycle_table = duty_cycle_table; drvdata->desc.ops = &pwm_regulator_voltage_table_ops; drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table); diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index 44e4cecbf6dec0600cc2501487a74a3b2bc9fac2..87b020d0b958dd9869a9ec6024f1892fbe651ce9 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -319,7 +319,7 @@ static int slg51000_regulator_init(struct slg51000 *chip) rdesc->linear_min_sel = 0; break; } - /* Fall through - to the check below.*/ + fallthrough; /* to the check below */ default: rdesc->linear_min_sel = vsel_range[0]; diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index f7db250a75839ece685010fae72817613dadaddb..430265c404d65846733898e2f118739ca46ecdb7 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -312,7 +312,7 @@ static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) switch (info->flags) { case SMPS_OFFSET_EN: voltage = 100000; - /* fall through */ + fallthrough; case 0: switch (index) { case 0: diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index 6955fab0a78b787fa5e908fc1235579a1b96796d..d94b7391bf9ddf24167ca22001c2fe0a242d368d 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -511,7 +511,6 @@ static void omap_rproc_mbox_callback(struct mbox_client *client, void *data) dev_info(dev, "received echo reply from %s\n", name); break; case RP_MBOX_SUSPEND_ACK: - /* Fall through */ case RP_MBOX_SUSPEND_CANCEL: oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK; complete(&oproc->pm_comp); diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index d170fe66321060356270ac330dacf9e55975b648..e8aa8691deb2dbc7bbd62153a5b82943a3c253cb 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c @@ -222,7 +222,7 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev, switch (id) { case IMX8MQ_RESET_PCIEPHY: - case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */ + case IMX8MQ_RESET_PCIEPHY2: /* * wait for more than 10us to release phy g_rst and * btnrst @@ -232,12 +232,12 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev, break; case IMX8MQ_RESET_PCIE_CTRL_APPS_EN: - case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */ - case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */ + case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: + case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_RESET_N: + case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: value = assert ? 0 : bit; break; } diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 1995f5b3ea6775e22f85dc97461a107fc5b4c561..f40312b16da06528fc11b9ee18589bd7d6728378 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -553,7 +553,7 @@ static void qcom_glink_receive_version(struct qcom_glink *glink, break; case GLINK_VERSION_1: glink->features &= features; - /* FALLTHROUGH */ + fallthrough; default: qcom_glink_send_version_ack(glink); break; @@ -584,7 +584,7 @@ static void qcom_glink_receive_version_ack(struct qcom_glink *glink, break; glink->features &= features; - /* FALLTHROUGH */ + fallthrough; default: qcom_glink_send_version(glink); break; diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 9b70b371bd0c379ef9564d5db0e193973062e975..8a89bc52b0d45122e968e29941ce85c3a16a2384 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -740,7 +740,7 @@ static int wdt_ioctl(struct file *file, unsigned int cmd, return -EINVAL; wdt_margin = new_margin; wdt_ping(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(wdt_margin, (int __user *)arg); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index ca55ba975aeb19c518fde0b3000b1b123af3c6b4..f8b99cb72959040f5460fd8a567307eff6adaf65 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -353,7 +353,7 @@ static int pcf85063_load_capacitance(struct pcf85063 *pcf85063, default: dev_warn(&pcf85063->rtc->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000", load); - /* fall through */ + fallthrough; case 7000: break; case 12500: diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 47e0f411dd5cb744f84f0fa2269a882b9038e08c..57d351dfe2723306bbf9a42b05e6f3ec64348c03 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -108,7 +108,7 @@ static int pcf8523_load_capacitance(struct i2c_client *client) default: dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", load); - /* fall through */ + fallthrough; case 12500: value |= REG_CONTROL1_CAP_SEL; break; diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index c9bc3d4a1e66c40a8897244b7063998c351d2d44..0a969af80af7c0ae96485df94b11190d650e12b8 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -331,7 +331,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev) default: dev_warn(&pdev->dev, "invalid crystal-freq specified in device-tree. Assuming no crystal\n"); - /* fall-through */ + fallthrough; case 0: /* keep XTAL on in low-power mode */ pers0_set = STMP3XXX_RTC_PERSISTENT0_XTAL24MHZ_PWRUP; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 94edbb33d0d1fa7db7c93c8fe6702b7c30eebb8e..aca022239b333e6eabe0904ef4e7b6e77c00a84d 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data) rc = css_evaluate_known_subchannel(sch, 1); if (rc == -EAGAIN) css_schedule_eval(sch->schid); + /* + * The loop might take long time for platforms with lots of + * known devices. Allow scheduling here. + */ + cond_resched(); } return 0; } diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 3ce99e4db44d5fd573a9500f991d1947a3fe1551..661d2a49bce96a00f61d69230bd65105e6870547 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1695,7 +1695,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) grp->changed_side = 2; break; } - /* Else, fall through */ + fallthrough; case MPCG_STATE_XID0IOWAIX: case MPCG_STATE_XID7INITW: case MPCG_STATE_XID7INITX: diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index ab316baa828433ed71390aa96c04f8600c641101..85a1a4533cbebc32fb80638edb29d5461842025d 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -357,7 +357,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ if (callback) grp->send_qllc_disc = 1; - /* Else, fall through */ + fallthrough; case MPCG_STATE_XID0IOWAIT: fsm_deltimer(&grp->timer); grp->outstanding_xid2 = 0; @@ -1470,7 +1470,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) && (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) break; - /* Else, fall through */ + fallthrough; default: fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); } @@ -2089,7 +2089,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev) grp->estconnfunc = NULL; break; } - /* Else, fall through */ + fallthrough; case MPCG_STATE_FLOWC: case MPCG_STATE_READY: grp->send_qllc_disc = 2; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bba1b54b8aa390ee910b96418eab8b4747bf84ea..6a739825142370348e8e1ce91d9b00e23f90744c 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1071,7 +1071,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, break; case -EIO: qeth_schedule_recovery(card); - /* fall through */ + fallthrough; default: qeth_clear_ipacmd_list(card); goto err_idx; @@ -2886,7 +2886,7 @@ void qeth_print_status_message(struct qeth_card *card) card->info.mcl_level[3]); break; } - /* fallthrough */ + fallthrough; case QETH_CARD_TYPE_IQD: if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index ebdc03210608957ad3dcca451f9121e4e13ae704..f870c5322bfe7431fc3cf063aaa293acb1894837 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -356,7 +356,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, 10000baseT_Full); ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); - /* fall through */ + fallthrough; case SPEED_1000: ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); @@ -366,7 +366,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, 1000baseT_Half); ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Half); - /* fall through */ + fallthrough; case SPEED_100: ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); @@ -376,7 +376,7 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, 100baseT_Half); ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); - /* fall through */ + fallthrough; case SPEED_10: ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 8b342a88ff5cb9c8ab8d0c5e49ee655632fa78f9..3a94f6cad1676f6db27c433f97ce6c16478debb4 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -488,7 +488,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work) kfree(mac); break; } - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ mac->disp_flag = QETH_DISP_ADDR_DELETE; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index fe44b0249e34ad3d8eb8ee0940e132617547974e..4d461960370d95ea93134d1e11d0edb40307db9f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1235,7 +1235,7 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) break; } addr->ref_counter = 1; - /* fall through */ + fallthrough; default: /* for next call to set_rx_mode(): */ addr->disp_flag = QETH_DISP_ADDR_DELETE; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c795f22249d8ff03665494e1f9e55aa5166fb6bb..140186fe1d1e0b418246fd4478662fb0740706f9 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -434,7 +434,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) return; } - del_timer(&req->timer); + del_timer_sync(&req->timer); zfcp_fsf_protstatus_eval(req); zfcp_fsf_fsfstatus_eval(req); req->handler(req); @@ -867,7 +867,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { - del_timer(&req->timer); + del_timer_sync(&req->timer); /* lookup request again, list might have changed */ zfcp_reqlist_find_rm(adapter->req_list, req_id); zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 461b3babb601ef4894da677acb196745c2a4c0b2..84b57a8f86bfa942fd78fa8f17c27ff9302d9df2 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1832,7 +1832,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) case REQUEST_SENSE: /* clear the internal sense magic */ SCp->cmnd[6] = 0; - /* fall through */ + fallthrough; default: /* OK, get it from the command */ switch(SCp->sc_data_direction) { diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index bb49d83cadc787c52d10c1f899cefc5531626551..ccb061ab0a0ad2bf11819a98cfc5040831381b2a 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2635,7 +2635,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter, case BLOGIC_BAD_CMD_PARAM: blogic_warn("BusLogic Driver Protocol Error 0x%02X\n", adapter, adapter_status); - /* fall through */ + fallthrough; case BLOGIC_DATA_UNDERRUN: case BLOGIC_DATA_OVERRUN: case BLOGIC_NOEXPECT_BUSFREE: diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index 0f17bd51088ac7890c94cda8b887b6a7aae3e5e6..24ace18240480a212137e277b3bfa864d90cb6b3 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c @@ -1034,11 +1034,14 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) temp6 >>= 1; switch (temp & 0x3) { case AUTO_RATE_20: /* Synchronous, 20 mega-transfers/second */ - temp6 |= 0x8000; /* Fall through */ + temp6 |= 0x8000; + fallthrough; case AUTO_RATE_10: /* Synchronous, 10 mega-transfers/second */ - temp5 |= 0x8000; /* Fall through */ + temp5 |= 0x8000; + fallthrough; case AUTO_RATE_05: /* Synchronous, 5 mega-transfers/second */ - temp2 |= 0x8000; /* Fall through */ + temp2 |= 0x8000; + fallthrough; case AUTO_RATE_00: /* Asynchronous */ break; } diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index f2f7e6e76c07ac10b4bbf8f99a5d74c1315d960d..d654a6cc4162f8ff7a8dcf9675886f816b09bc5b 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -1943,7 +1943,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) return; /* Reject message */ - /* Fall through */ + fallthrough; default: /* * If we get something weird that we aren't expecting, diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 769af4ca9ca97e675c48e6a15750132b1969cf56..fd6ae5c380861c0917a5fd78ae3f0806a021138a 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2809,7 +2809,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) !(dev->raw_io_64) || ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; - /* fall through */ + fallthrough; case INQUIRY: case READ_CAPACITY: case TEST_UNIT_READY: @@ -2884,7 +2884,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) /* Issue FIB to tell Firmware to flush it's cache */ if ((aac_cache & 6) != 2) return aac_synchronize(scsicmd); - /* fall through */ + fallthrough; case INQUIRY: { struct inquiry_data inq_data; @@ -3240,7 +3240,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) SCSI_SENSE_BUFFERSIZE)); break; } - /* fall through */ + fallthrough; case RESERVE: case RELEASE: case REZERO_UNIT: @@ -3253,7 +3253,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case START_STOP: return aac_start_stop(scsicmd); - /* FALLTHRU */ + fallthrough; default: /* * Unhandled commands diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index adbdc3b7c7a706e0151c698f84c73251157a05e8..383e74fea6edf13fde747411b09db04b4e44e140 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1431,7 +1431,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) "enclosure services event"); scsi_device_set_state(device, SDEV_RUNNING); } - /* FALLTHRU */ + fallthrough; case CHANGE: if ((channel == CONTAINER_CHANNEL) && (!dev->fsa_dev[container].valid)) { diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 8588da0a065551a6db065257c87a61f4ef9360a7..a3aee146537bc839db65f503e71b775f67dc2ba0 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -765,7 +765,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) !(aac->raw_io_64) || ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) break; - /* fall through */ + fallthrough; case INQUIRY: case READ_CAPACITY: /* diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index c912d29b8bdf7bc302753792a1d859d611649391..1c617c0d5899f302900c8c443f84ecaea4b3a347 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -2274,7 +2274,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); - /* fall through */ + fallthrough; case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; @@ -2285,7 +2285,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; - /* fall through */ + fallthrough; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; @@ -3791,7 +3791,7 @@ ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } - /* FALLTHROUGH */ + fallthrough; case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; @@ -5104,7 +5104,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); - /* FALLTHROUGH */ + fallthrough; case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; @@ -5454,7 +5454,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; - /* FALLTHROUGH */ + fallthrough; case MSG_TERM_IO_PROC: default: reject = TRUE; @@ -6117,17 +6117,17 @@ ahd_free(struct ahd_softc *ahd) default: case 5: ahd_shutdown(ahd); - /* FALLTHROUGH */ + fallthrough; case 4: ahd_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); - /* FALLTHROUGH */ + fallthrough; case 3: ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); - /* FALLTHROUGH */ + fallthrough; case 2: ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: @@ -6513,7 +6513,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd) } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); } - /* fall through */ + fallthrough; case 6: { struct map_node *sg_map; @@ -6528,7 +6528,7 @@ ahd_fini_scbdata(struct ahd_softc *ahd) } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); } - /* fall through */ + fallthrough; case 5: { struct map_node *hscb_map; @@ -7171,7 +7171,7 @@ ahd_init(struct ahd_softc *ahd) case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; - /* fall through */ + fallthrough; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) @@ -8175,12 +8175,12 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); - /* FALLTHROUGH */ + fallthrough; case SEARCH_REMOVE: break; case SEARCH_PRINT: printk(" 0x%x", ahd->qinfifo[qinpos]); - /* FALLTHROUGH */ + fallthrough; case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; @@ -8271,7 +8271,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); - /* FALLTHROUGH */ + fallthrough; case SEARCH_REMOVE: { u_int tail_offset; @@ -8295,7 +8295,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, } case SEARCH_PRINT: printk(" 0x%x", SCB_GET_TAG(scb)); - /* FALLTHROUGH */ + fallthrough; case SEARCH_COUNT: break; } @@ -8376,7 +8376,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); - /* fall through */ + fallthrough; case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; @@ -8385,7 +8385,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, break; case SEARCH_PRINT: printk("0x%x ", scbid); - /* fall through */ + fallthrough; case SEARCH_COUNT: prev = scbid; break; @@ -9023,7 +9023,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) case SCSI_STATUS_OK: printk("%s: Interrupted for status of 0???\n", ahd_name(ahd)); - /* FALLTHROUGH */ + fallthrough; default: ahd_done(ahd, scb); break; @@ -9512,7 +9512,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); } - /* fall through */ + fallthrough; case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: @@ -9523,7 +9523,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; - /* fall through */ + fallthrough; case AIC_OP_ROL: { int i, count; diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index d019e3f2bb9b3c038dedb981ead9fca0cb167d40..7c321303969eb0b15e715363782662b3bebe9078 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -2035,7 +2035,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd) break; case CAM_AUTOSENSE_FAIL: new_status = DID_ERROR; - /* Fallthrough */ + fallthrough; case CAM_SCSI_STATUS_ERROR: scsi_status = ahd_cmd_get_scsi_status(cmd); diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 3d4df906fa4f4e4d470b40f6b81d5f768640fa2d..2231c4afa531bbcb213eca31361f4f528210fb73 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -2404,7 +2404,7 @@ ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo, *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } - /* FALLTHROUGH */ + fallthrough; case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; @@ -3599,7 +3599,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) break; case MSG_MESSAGE_REJECT: response = ahc_handle_msg_reject(ahc, devinfo); - /* FALLTHROUGH */ + fallthrough; case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; @@ -4465,17 +4465,17 @@ ahc_free(struct ahc_softc *ahc) default: case 5: ahc_shutdown(ahc); - /* FALLTHROUGH */ + fallthrough; case 4: ahc_dmamap_unload(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); - /* FALLTHROUGH */ + fallthrough; case 3: ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, ahc->shared_data_dmamap); ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap); - /* FALLTHROUGH */ + fallthrough; case 2: ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); case 1: @@ -4893,30 +4893,30 @@ ahc_fini_scbdata(struct ahc_softc *ahc) } ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); } - /* fall through */ + fallthrough; case 6: ahc_dmamap_unload(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); - /* fall through */ + fallthrough; case 5: ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, scb_data->sense_dmamap); ahc_dmamap_destroy(ahc, scb_data->sense_dmat, scb_data->sense_dmamap); - /* fall through */ + fallthrough; case 4: ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); - /* fall through */ + fallthrough; case 3: ahc_dmamap_unload(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); - /* fall through */ + fallthrough; case 2: ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, scb_data->hscb_dmamap); ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap); - /* fall through */ + fallthrough; case 1: ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); break; @@ -5981,7 +5981,7 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, printk("Inactive SCB in Waiting List\n"); ahc_done(ahc, scb); } - /* fall through */ + fallthrough; case SEARCH_REMOVE: next = ahc_rem_wscb(ahc, next, prev); break; @@ -6987,7 +6987,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) address -= address_offset; fmt3_ins->address = address; } - /* fall through */ + fallthrough; case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: @@ -7013,7 +7013,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) fmt1_ins->opcode = AIC_OP_AND; fmt1_ins->immediate = 0xff; } - /* fall through */ + fallthrough; case AIC_OP_ROL: if ((ahc->features & AHC_ULTRA2) != 0) { int i, count; diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index c264b4b56970b1a8050a5def47c37a2d77364aa3..e2d880a5f391535cc9fb37f1bac604ac6280c401 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -706,11 +706,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) switch (pd->max_sas_lrate) { case SAS_LINK_RATE_6_0_GBPS: *speed_mask &= ~SAS_SPEED_60_DIS; - /* fall through*/ + fallthrough; default: case SAS_LINK_RATE_3_0_GBPS: *speed_mask &= ~SAS_SPEED_30_DIS; - /* fall through*/ + fallthrough; case SAS_LINK_RATE_1_5_GBPS: *speed_mask &= ~SAS_SPEED_15_DIS; } @@ -718,7 +718,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) switch (pd->min_sas_lrate) { case SAS_LINK_RATE_6_0_GBPS: *speed_mask |= SAS_SPEED_30_DIS; - /* fall through*/ + fallthrough; case SAS_LINK_RATE_3_0_GBPS: *speed_mask |= SAS_SPEED_15_DIS; default: @@ -730,7 +730,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) switch (pd->max_sata_lrate) { case SAS_LINK_RATE_3_0_GBPS: *speed_mask &= ~SATA_SPEED_30_DIS; - /* fall through*/ + fallthrough; default: case SAS_LINK_RATE_1_5_GBPS: *speed_mask &= ~SATA_SPEED_15_DIS; @@ -789,7 +789,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc) /* link reset retries, this should be nominal */ control_phy->link_reset_retries = 10; - /* fall through */ + fallthrough; case RELEASE_SPINUP_HOLD: /* 0x02 */ /* decide the func_mask */ diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 1fcee65193a33e0245c10599681bc75a1ef59ded..0eb6e206a2b48f2ddf0c1169963fcc9c9f085cb2 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -490,7 +490,7 @@ int asd_abort_task(struct sas_task *task) switch (tcs.dl_opcode) { default: res = asd_clear_nexus(task); - /* fallthrough */ + fallthrough; case TC_NO_ERROR: break; /* The task hasn't been sent to the device xor diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index fa562a085600ded315af1e20442e2372ff4d31e6..ec895d0319f02ff45e5c0387c4c3338438ef92d3 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -4470,7 +4470,7 @@ static const char *arcmsr_info(struct Scsi_Host *host) case PCI_DEVICE_ID_ARECA_1202: case PCI_DEVICE_ID_ARECA_1210: raid6 = 0; - /*FALLTHRU*/ + fallthrough; case PCI_DEVICE_ID_ARECA_1120: case PCI_DEVICE_ID_ARECA_1130: case PCI_DEVICE_ID_ARECA_1160: diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 6c68c23036383406d65f328ba4bfeac88aa2521b..2e687ce6075383a8338c0acd5ed908276e072539 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -603,7 +603,7 @@ static void fas216_handlesync(FAS216_Info *info, char *msg) msgqueue_flush(&info->scsi.msgs); msgqueue_addmsg(&info->scsi.msgs, 1, MESSAGE_REJECT); info->scsi.phase = PHASE_MSGOUT_EXPECT; - /* fall through */ + fallthrough; case async: dev->period = info->ifcfg.asyncperiod / 4; @@ -916,7 +916,7 @@ static void fas216_disconnect_intr(FAS216_Info *info) fas216_done(info, DID_ABORT); break; } - /* else, fall through */ + fallthrough; default: /* huh? */ printk(KERN_ERR "scsi%d.%c: unexpected disconnect in phase %s\n", @@ -1413,7 +1413,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case STATE(STAT_STATUS, PHASE_DATAOUT): /* Data Out -> Status */ case STATE(STAT_STATUS, PHASE_DATAIN): /* Data In -> Status */ fas216_stoptransfer(info); - /* fall through */ + fallthrough; case STATE(STAT_STATUS, PHASE_SELSTEPS):/* Sel w/ steps -> Status */ case STATE(STAT_STATUS, PHASE_MSGOUT): /* Message Out -> Status */ @@ -1426,7 +1426,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne case STATE(STAT_MESGIN, PHASE_DATAOUT): /* Data Out -> Message In */ case STATE(STAT_MESGIN, PHASE_DATAIN): /* Data In -> Message In */ fas216_stoptransfer(info); - /* fall through */ + fallthrough; case STATE(STAT_MESGIN, PHASE_COMMAND): /* Command -> Message In */ case STATE(STAT_MESGIN, PHASE_SELSTEPS):/* Sel w/ steps -> Message In */ @@ -1581,7 +1581,7 @@ static void fas216_funcdone_intr(FAS216_Info *info, unsigned int stat, unsigned fas216_message(info); break; } - /* else, fall through */ + fallthrough; default: fas216_log(info, 0, "internal phase %s for function done?" @@ -1964,7 +1964,7 @@ static void fas216_kick(FAS216_Info *info) switch (where_from) { case TYPE_QUEUE: fas216_allocate_tag(info, SCpnt); - /* fall through */ + fallthrough; case TYPE_OTHER: fas216_start_command(info, SCpnt); break; diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 93da6344424dd7e53d7a03a5d4736828be9c3f53..a13c203ef7a9a88260cfb430ab4f537498f17963 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -677,7 +677,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_MAX_XMIT_DLENGTH: if (conn->max_xmit_dlength > 65536) conn->max_xmit_dlength = 65536; - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 8dc2e0824ad78261ed450602f6a0e84766b9fad6..5c3513a4b450ed417ce17007f677e4aca518b31b 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1532,7 +1532,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: error = 1; - /* fall through */ + fallthrough; case UNSOL_DATA_NOTIFY: pasync_handle = pasync_ctx->async_entry[ci].data; break; diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 29f99561dfc3ad39d55075b84377d2e1ebf208e0..38d1c453074dc1d91198d1aed575b18f40c1afbe 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -2572,7 +2572,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) case FCP_IODIR_RW: bfa_stats(itnim, input_reqs); bfa_stats(itnim, output_reqs); - /* fall through */ + fallthrough; default: bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa)); } @@ -2807,7 +2807,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) case BFI_IOIM_STS_TIMEDOUT: bfa_stats(ioim->itnim, iocomp_timedout); - /* fall through */ + fallthrough; case BFI_IOIM_STS_ABORTED: rsp->io_status = BFI_IOIM_STS_ABORTED; bfa_stats(ioim->itnim, iocomp_aborted); @@ -3203,7 +3203,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, switch (event) { case BFA_TSKIM_SM_DONE: bfa_reqq_wcancel(&tskim->reqq_wait); - /* fall through */ + fallthrough; case BFA_TSKIM_SM_QRESUME: bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); bfa_tskim_send_abort(tskim); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 297a77f5806ccf46bb2fe96805966b46a26feb62..3486e402bfc1f3a2d410c85f03d27ee755f5922d 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -6422,7 +6422,7 @@ bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - /* fall through */ + fallthrough; case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: @@ -6448,7 +6448,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - /* fall through */ + fallthrough; case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 143c35bd668c2626157dae76add4e0d92f9a1b67..c21aa37b8adbe483c22f56eee3589d3b9cbc7ea9 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -419,13 +419,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_fcxp_discard(rport->fcxp); - /* fall through */ + fallthrough; case RPSM_EVENT_FAILED: if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { rport->plogi_retries++; @@ -856,7 +856,7 @@ bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, * At least go offline when a PLOGI is received. */ bfa_fcxp_discard(rport->fcxp); - /* fall through */ + fallthrough; case RPSM_EVENT_FAILED: case RPSM_EVENT_ADDRESS_CHANGE: @@ -1042,7 +1042,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); @@ -1131,7 +1131,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_plogiacc(rport, NULL); break; } - /* fall through */ + fallthrough; case RPSM_EVENT_ADDRESS_CHANGE: if (!bfa_fcs_lport_is_online(rport->port)) { @@ -1288,7 +1288,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); @@ -1332,7 +1332,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* fall through */ + fallthrough; case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index dd5821dfcac2d68585d3a4276bfdada0b943228e..325ad8a592bbc7bb26e60dcf5eda25b0d8d95524 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -969,7 +969,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_INITFAIL: bfa_iocpf_timer_stop(ioc); - /* fall through */ + fallthrough; case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); @@ -1045,7 +1045,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_FAIL: bfa_iocpf_timer_stop(ioc); - /* fall through */ + fallthrough; case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); @@ -5988,7 +5988,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, case BFA_DCONF_SM_IOCDISABLE: case BFA_DCONF_SM_FLASH_COMP: bfa_timer_stop(&dconf->timer); - /* fall through */ + fallthrough; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 1e266c1ef7938f01a8c449845062f578294e645c..11c0c3e6f014ff6401803dd6f4c77305120b153e 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -6397,7 +6397,7 @@ bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) dport->test_state = BFA_DPORT_ST_INP; bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU); } - /* fall thru */ + fallthrough; case BFA_DPORT_SM_REQFAIL: bfa_sm_set_state(dport, bfa_dport_sm_enabled); diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index e72d7bb7f4f42f3e6cb465d28fbc6424cc616985..08992095ce7ae2a5bfb54d1e151d4634706ff3ed 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1404,7 +1404,6 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], break; case FCOE_KCQE_OPCODE_FCOE_ERROR: - /* fall thru */ default: printk(KERN_ERR PFX "unknown opcode 0x%x\n", kcqe->op_code); diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 98d4d39aaa572473896ec17c895a7f4254c1a6bc..7fa20609d5e7f51f89155bbdfd2a95af7a9b8f76 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -2939,7 +2939,7 @@ csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt) case CSIO_HWE_FW_DLOAD: csio_set_state(&hw->sm, csio_hws_resetting); /* Download firmware */ - /* Fall through */ + fallthrough; case CSIO_HWE_HBA_RESET: csio_set_state(&hw->sm, csio_hws_resetting); diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 61cf54208451a591830c6448ba821f8ec0afe548..dc98f51f466fb3e0d3bc51034beaad0e05d5f348 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -1187,7 +1187,6 @@ csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) break; case CSIO_LNE_LINK_DOWN: - /* Fall through */ case CSIO_LNE_DOWN_LINK: csio_set_state(&ln->sm, csio_lns_uninit); if (csio_is_phys_ln(ln)) { diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index 0ca695110f5493668b36f2e3375ae84163f6ee43..9010cb6045dc54fe39aa75e7c85c2ea0a6c57a89 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -808,7 +808,7 @@ csio_wr_destroy_queues(struct csio_hw *hw, bool cmd) csio_q_eqid(hw, i) = CSIO_MAX_QID; } - /* fall through */ + fallthrough; case CSIO_INGRESS: if (csio_q_iqid(hw, i) != CSIO_MAX_QID) { csio_wr_cleanup_iq_ftr(hw, i); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 2b48954b6b1ef58549f0f7ff07b20546a7acca59..37d99357120faeae6d16b6fdbc18993e29b96af9 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -643,7 +643,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, int *need_rst) { switch (abort_reason) { - case CPL_ERR_BAD_SYN: /* fall through */ + case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; case CPL_ERR_XMIT_TIMEDOUT: diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 4e82c14cb795e19a7259744b52f6ce9002013033..2c3491528d4245873505bcb09c481215569a5fdc 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1133,7 +1133,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, int *need_rst) { switch (abort_reason) { - case CPL_ERR_BAD_SYN: /* fall through */ + case CPL_ERR_BAD_SYN: case CPL_ERR_CONN_RESET: return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 71aebaf533ea543dd4ec701aab5166036ed020bc..0e8621a6956dcc6d3bf325e760ec4cc217f210fd 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -2457,10 +2457,10 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) return err; } - __kfree_skb(skb); log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", task->itt, skb, skb->len, skb->data_len, err); + __kfree_skb(skb); iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); return err; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 94250ebe9e803cd66cabb511a066ad123e273ce8..e72440d919d2aab61657ac7e27873c576a0b18e7 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -748,16 +748,16 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ if (index == PRIMARY_HWQ) cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); - /* fall through */ + fallthrough; case UNMAP_TWO: cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); - /* fall through */ + fallthrough; case UNMAP_ONE: cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); - /* fall through */ + fallthrough; case FREE_IRQ: cfg->ops->free_afu_irqs(hwq->ctx_cookie); - /* fall through */ + fallthrough; case UNDO_NOOP: /* No action required */ break; @@ -971,18 +971,18 @@ static void cxlflash_remove(struct pci_dev *pdev) switch (cfg->init_state) { case INIT_STATE_CDEV: cxlflash_release_chrdev(cfg); - /* fall through */ + fallthrough; case INIT_STATE_SCSI: cxlflash_term_local_luns(cfg); scsi_remove_host(cfg->host); - /* fall through */ + fallthrough; case INIT_STATE_AFU: term_afu(cfg); - /* fall through */ + fallthrough; case INIT_STATE_PCI: cfg->ops->destroy_afu(cfg->afu_cookie); pci_disable_device(pdev); - /* fall through */ + fallthrough; case INIT_STATE_NONE: free_mem(cfg); scsi_host_put(cfg->host); @@ -2355,11 +2355,11 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) cxlflash_schedule_async_reset(cfg); break; } - /* fall through - to retry */ + fallthrough; /* to retry */ case -EAGAIN: if (++nretry < 2) goto retry; - /* fall through - to exit */ + fallthrough; /* to exit */ default: break; } @@ -2533,12 +2533,12 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) cfg->state = STATE_NORMAL; wake_up_all(&cfg->reset_waitq); ssleep(1); - /* fall through */ + fallthrough; case STATE_RESET: wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); if (cfg->state == STATE_NORMAL) break; - /* fall through */ + fallthrough; default: rc = FAILED; break; @@ -3019,7 +3019,7 @@ static ssize_t num_hwqs_store(struct device *dev, wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); if (cfg->state == STATE_NORMAL) goto retry; - /* else, fall through */ + fallthrough; default: /* Ideally should not happen */ dev_err(dev, "%s: Device is not ready, state=%d\n", @@ -3531,7 +3531,7 @@ static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, if (likely(do_ioctl)) break; - /* fall through */ + fallthrough; default: rc = -EINVAL; goto out; diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 593669ac3669443763f74a468fdc965a74845b6f..5dddf67dfa24a50ad9232c819099f65a9ac4e717 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -375,14 +375,13 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) switch (sshdr.sense_key) { case NO_SENSE: case RECOVERED_ERROR: - /* fall through */ case NOT_READY: result &= ~SAM_STAT_CHECK_CONDITION; break; case UNIT_ATTENTION: switch (sshdr.asc) { case 0x29: /* Power on Reset or Device Reset */ - /* fall through */ + fallthrough; case 0x2A: /* Device capacity changed */ case 0x3F: /* Report LUNs changed */ /* Retry the command once more */ @@ -1791,13 +1790,12 @@ static int process_sense(struct scsi_device *sdev, switch (sshdr.sense_key) { case NO_SENSE: case RECOVERED_ERROR: - /* fall through */ case NOT_READY: break; case UNIT_ATTENTION: switch (sshdr.asc) { case 0x29: /* Power on Reset or Device Reset */ - /* fall through */ + fallthrough; case 0x2A: /* Device settings/capacity changed */ rc = read_cap16(sdev, lli); if (rc) { @@ -2157,7 +2155,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) if (unlikely(rc)) goto cxlflash_ioctl_exit; - /* fall through */ + fallthrough; case DK_CXLFLASH_MANAGE_LUN: known_ioctl = true; @@ -2168,7 +2166,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) if (likely(do_ioctl)) break; - /* fall through */ + fallthrough; default: rc = -EINVAL; goto cxlflash_ioctl_exit; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 8acd4bb9fefbb409a8ae0d44e52311d152508b50..4a3f7831a2d648dddeba35e5c55cb6359c9d425e 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -60,7 +60,7 @@ static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h, ret = SCSI_DH_OK; break; } - /* Fallthrough */ + fallthrough; default: sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, sense %x/%x/%x\n", @@ -147,7 +147,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) rc = SCSI_DH_RETRY; break; } - /* fall through */ + fallthrough; default: sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, " diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index b02ac389e6c6050fc92696d480131db15678ebb4..429d64299fe94645736ba1bda08904c4128f5598 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -1500,7 +1500,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, return complete_fmapi_req(a, rq, FI_STAT_SUCCESS); } - /* fall through */ + fallthrough; case FI_ACT_UP: /* Upload the components */ default: diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index eb7d139ffc00baf279b6e61f394265187dfa269b..09c5c24bf391fd4a2467ca6d89e0d181b130267b 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -1236,7 +1236,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, a->init_msg = ESAS2R_INIT_MSG_GET_INIT; break; } - /* fall through */ + fallthrough; case ESAS2R_INIT_MSG_GET_INIT: if (msg == ESAS2R_INIT_MSG_GET_INIT) { @@ -1250,7 +1250,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, esas2r_hdebug("FAILED"); } } - /* fall through */ + fallthrough; default: rq->req_stat = RS_SUCCESS; diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 89afa31e33cba889a9af63dd5253cf8a72f7ff0d..43a1fd11df5ed2ebb89c60e230868db923bcba1f 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -307,7 +307,7 @@ static void esp_reset_esp(struct esp *esp) case FASHME: esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); - /* fallthrough... */ + fallthrough; case FAS236: case PCSCSI: @@ -1741,7 +1741,7 @@ static int esp_process_event(struct esp *esp) case ESP_EVENT_DATA_IN: write = 1; - /* fallthru */ + fallthrough; case ESP_EVENT_DATA_OUT: { struct esp_cmd_entry *ent = esp->active_cmd; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 1409c7687853c9da8f4e751b094f85c2860c8cec..5ea426effa60964bbdf0e79da95c8830ea6fff8e 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -450,10 +450,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) switch (fip->mode) { default: LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); - /* fall-through */ + fallthrough; case FIP_MODE_AUTO: LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); - /* fall-through */ + fallthrough; case FIP_MODE_FABRIC: case FIP_MODE_NON_FIP: mutex_unlock(&fip->ctlr_mutex); @@ -773,7 +773,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, fc_fcoe_set_mac(mac, fh->fh_d_id); fip->update_mac(lport, mac); } - /* fall through */ + fallthrough; case ELS_LS_RJT: op = fr_encaps(fp); if (op) @@ -2439,7 +2439,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, frport->enode_mac, 0); break; } - /* fall through */ + fallthrough; case FIP_ST_VNMP_START: LIBFCOE_FIP_DBG(fip, "vn_probe_req: " "restart VN2VN negotiation\n"); diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 2cc676e3df6a74a4bd38b796dc067d07d3c03567..29e4cdcade7201166a7751e157dbb1561d29740b 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -340,7 +340,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt, break; case BOARD_DTC3181E: hostdata->io_width = 2; /* 16-bit PDMA */ - /* fall through */ + fallthrough; case BOARD_NCR53C400A: case BOARD_HP_C2502: hostdata->c400_ctl_status = 9; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 11caa4b0d79773e80f17e38f4653aeeb03b3ac81..d9d21d23372e54dc659ed974049e6178ff6c9292 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1144,7 +1144,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, hisi_hba->hw->get_events(hisi_hba, phy_no); break; } - /* fallthru */ + fallthrough; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: return -EOPNOTSUPP; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 91794a50b31fe541eb39f788cbebd4ddbaaa6a3c..48d5da59262b43b73aa76c037531b5368df8bbdb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4697,7 +4697,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) case WRITE_6: case WRITE_12: is_write = 1; - /* fall through */ + fallthrough; case READ_6: case READ_12: if (*cdb_len == 6) { @@ -5147,7 +5147,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, switch (cmd->cmnd[0]) { case WRITE_6: is_write = 1; - /* fall through */ + fallthrough; case READ_6: first_block = (((cmd->cmnd[1] & 0x1F) << 16) | (cmd->cmnd[2] << 8) | @@ -5158,7 +5158,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_10: is_write = 1; - /* fall through */ + fallthrough; case READ_10: first_block = (((u64) cmd->cmnd[2]) << 24) | @@ -5171,7 +5171,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_12: is_write = 1; - /* fall through */ + fallthrough; case READ_12: first_block = (((u64) cmd->cmnd[2]) << 24) | @@ -5186,7 +5186,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_16: is_write = 1; - /* fall through */ + fallthrough; case READ_16: first_block = (((u64) cmd->cmnd[2]) << 56) | diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 77f4d37d5bd625b88c8e2ea9c5827851c0e57cba..ea7c8930592dc49905f84bd5dcc414d8c93a2322 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1866,7 +1866,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job) port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | (bsg_request->rqst_data.h_els.port_id[1] << 8) | bsg_request->rqst_data.h_els.port_id[2]; - /* fall through */ + fallthrough; case FC_BSG_RPT_ELS: fc_flags = IBMVFC_FC_ELS; break; @@ -1875,7 +1875,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job) port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | (bsg_request->rqst_data.h_ct.port_id[1] << 8) | bsg_request->rqst_data.h_ct.port_id[2]; - /* fall through */ + fallthrough; case FC_BSG_RPT_CT: fc_flags = IBMVFC_FC_CT_IU; break; @@ -4122,7 +4122,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_host_init(vhost); - /* fall through */ + fallthrough; case IBMVFC_MAD_DRIVER_FAILED: ibmvfc_free_event(evt); return; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index d9e94e81da01787f28b4bbd6cd0d840f1aa833be..cc3908c2d2f943173e0bc6cffe7770032d1eb2a8 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -1581,7 +1581,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, case H_PERMISSION: if (connection_broken(vscsi)) flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); - /* Fall through */ + fallthrough; default: dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", rc); @@ -2489,10 +2489,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi) break; case H_CLOSED: vscsi->flags |= CLIENT_FAILED; - /* Fall through */ + fallthrough; case H_DROPPED: vscsi->flags |= RESPONSE_Q_DOWN; - /* Fall through */ + fallthrough; case H_REMOTE_PARM: dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", rc); diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 1459b1467027994f1121de7c95c2d4f8b2782765..862d35a098cf3c9f3cd9ca11a3a9bb222f221df4 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -801,7 +801,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) case 1: /* Phase 1 - Connected */ imm_connect(dev, CONNECT_EPP_MAYBE); cmd->SCp.phase++; - /* fall through */ + fallthrough; case 2: /* Phase 2 - We are now talking to the scsi bus */ if (!imm_select(dev, scmd_id(cmd))) { @@ -809,7 +809,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) return 0; } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 3: /* Phase 3 - Ready to accept a command */ w_ctr(ppb, 0x0c); @@ -819,7 +819,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) if (!imm_send_command(cmd)) return 0; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 4: /* Phase 4 - Setup scatter/gather buffers */ if (scsi_bufflen(cmd)) { @@ -835,7 +835,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) cmd->SCp.phase++; if (cmd->SCp.this_residual & 0x01) cmd->SCp.this_residual++; - /* fall through */ + fallthrough; case 5: /* Phase 5 - Pre-Data transfer stage */ /* Spin lock for BUSY */ @@ -852,7 +852,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) if (imm_negotiate(dev)) return 0; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 6: /* Phase 6 - Data transfer stage */ /* Spin lock for BUSY */ @@ -868,7 +868,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) return 1; } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 7: /* Phase 7 - Post data transfer stage */ if ((dev->dp) && (dev->rd)) { @@ -880,7 +880,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) } } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 8: /* Phase 8 - Read status/message */ /* Check for data overrun */ diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 7f9b3f20e5e4228c31afb9d9e73d1783209dc12b..4cacb800b53095712540bb9ffa5df22bdb3c588a 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -778,7 +778,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) break; case SCU_EVENT_LINK_FAILURE: scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); - /* fall through */ + fallthrough; case SCU_EVENT_HARD_RESET_RECEIVED: /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index cd1e4b4d95bbbba007f93630a81574ff20d3bc73..c3f540b556895675105679ec09c69190c10c91e5 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -310,7 +310,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost, /* Kill all outstanding requests for the device. */ sci_remote_device_terminate_requests(idev); - /* Fall through - into the default case... */ + fallthrough; /* into the default case */ default: clear_bit(IDEV_IO_READY, &idev->flags); break; @@ -593,7 +593,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, break; } - /* fall through - and treat as unhandled... */ + fallthrough; /* and treat as unhandled */ default: dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 474a434609636bb9cb65064aec27abda05f36719..68333f523b351a8305a829a546b17826d68a0f60 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -225,7 +225,7 @@ static void sci_remote_node_context_continue_state_transitions(struct sci_remote case RNC_DEST_READY: case RNC_DEST_SUSPENDED_RESUME: rnc->destination_state = RNC_DEST_READY; - /* Fall through... */ + fallthrough; case RNC_DEST_FINAL: sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); @@ -601,9 +601,9 @@ enum sci_status sci_remote_node_context_suspend( __func__, sci_rnc); return SCI_FAILURE_INVALID_STATE; } - /* Fall through - and handle like SCI_RNC_POSTING */ + fallthrough; /* and handle like SCI_RNC_POSTING */ case SCI_RNC_RESUMING: - /* Fall through - and handle like SCI_RNC_POSTING */ + fallthrough; /* and handle like SCI_RNC_POSTING */ case SCI_RNC_POSTING: /* Set the destination state to AWAIT - this signals the * entry into the SCI_RNC_READY state that a suspension diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 6561a07db18992686757adf8cfdfd6629f7d00fa..6e0817941fa74c5589dab4ef749d953526e31d6a 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -894,7 +894,7 @@ sci_io_request_terminate(struct isci_request *ireq) * and don't wait for the task response. */ sci_change_state(&ireq->sm, SCI_REQ_ABORTING); - /* Fall through - and handle like ABORTING... */ + fallthrough; /* and handle like ABORTING */ case SCI_REQ_ABORTING: if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) set_bit(IREQ_PENDING_ABORT, &ireq->flags); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index d8cbc9c0e766b1adabc3675fe0dfbe462d73dda4..e67abb184a8a0fb23a2fae0b7f58d43664e9841a 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -634,8 +634,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); - if (!IS_ERR(fp)) - fc_frame_free(fp); } /** diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 16eb3b60ed5825ae9253cd97436c3f345553b752..96a2952cf626bf1b3bb08c01cac23a9d5c359310 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -2108,7 +2108,7 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) switch (op) { case ELS_LS_RJT: FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); - /* fall through */ + fallthrough; case ELS_LS_ACC: goto cleanup; default: @@ -2622,7 +2622,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) case FC_EOF_T: if (f_ctl & FC_FC_END_SEQ) skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl)); - /* fall through */ + fallthrough; case FC_EOF_N: if (fh->fh_type == FC_TYPE_BLS) fc_exch_recv_bls(ema->mp, fp); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index e11d4f002bd493dc7f78d0782f330e4f6113c0ec..7cfeb6886237c0f44071fccc1ec7480b80679ec3 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -752,7 +752,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) brp = fc_frame_payload_get(fp, sizeof(*brp)); if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR) break; - /* fall thru */ + fallthrough; default: /* * we will let the command timeout @@ -1536,7 +1536,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) "device %x invalid REC reject %d/%d\n", fsp->rport->port_id, rjt->er_reason, rjt->er_explan); - /* fall through */ + fallthrough; case ELS_RJT_UNSUP: FC_FCP_DBG(fsp, "device does not support REC\n"); rpriv = fsp->rport->dd_data; @@ -1668,7 +1668,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n", fsp, fsp->rport->port_id, error); fsp->status_code = FC_CMD_PLOGO; - /* fall through */ + fallthrough; case -FC_EX_TIMEOUT: /* @@ -1830,7 +1830,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) break; case -FC_EX_CLOSED: /* e.g., link failure */ FC_FCP_DBG(fsp, "SRR error, exchange closed\n"); - /* fall through */ + fallthrough; default: fc_fcp_retry_cmd(fsp, FC_ERROR); break; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index b84dbc316df1513473b91b5fc682bbebc0ae2567..6557fda85c5c716c77609c33afc7cc58b5e35267 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1578,7 +1578,7 @@ static void fc_lport_timeout(struct work_struct *work) case LPORT_ST_DPRT: FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n", fc_lport_state(lport)); - /* fall thru */ + fallthrough; case LPORT_ST_SCR: fc_lport_enter_scr(lport); break; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 18663a82865f99d746a2c61fb4d2a19e2e5db8e3..a60b228d13f16b955e6a2d0f10f79cc33cf4ab66 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1723,7 +1723,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) kref_put(&rdata->kref, fc_rport_destroy); goto busy; } - /* fall through */ + fallthrough; default: FC_RPORT_DBG(rdata, "Reject ELS 0x%02x while in state %s\n", diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 49c8a1818baf89e58c7543f6157cb307cfcf5bd3..1e9c3171fa9f44bed8c982f8abaecc8284a764ff 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -248,7 +248,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) hdr_lun = scsilun_to_int(&tmf->lun); if (hdr_lun != task->sc->device->lun) return 0; - /* fall through */ + fallthrough; case ISCSI_TM_FUNC_TARGET_WARM_RESET: /* * Fail all SCSI cmd PDUs @@ -1674,7 +1674,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) sc->result = DID_NO_CONNECT << 16; break; } - /* fall through */ + fallthrough; case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; @@ -2239,7 +2239,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) "progress\n"); goto success; } - /* fall through */ + fallthrough; default: conn->tmf_state = TMF_INITIAL; goto failed; diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 6ef93c7af954816b1c87189fb5d7fc5199c83c5a..37e5d4e48c2f206af3ca71f4006bd136aae1b3b0 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -772,7 +772,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) iscsi_tcp_data_recv_prep(tcp_conn); return 0; } - /* fall through */ + fallthrough; case ISCSI_OP_LOGOUT_RSP: case ISCSI_OP_NOOP_IN: case ISCSI_OP_SCSI_TMFUNC_RSP: diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 1b93332daa6b38a206ced8e0393f3292ec0e7449..a4887985aad6e4ef341fdebf2a00a9d4a6bfbc0b 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -209,7 +209,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->num_scatter = si; } - task->data_dir = qc->dma_dir; + if (qc->tf.protocol == ATA_PROT_NODATA) + task->data_dir = DMA_NONE; + else + task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; @@ -324,7 +327,7 @@ static int smp_ata_check_ready(struct ata_link *link) case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); - /* fall through */ + fallthrough; default: return -ENODEV; } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index daf951b0b3f5538fa05f7c7b5e2e99626e18d5cd..cd7c7d269f6f4c9a1961980bde899a243d801658 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -108,7 +108,7 @@ static int sas_get_port_device(struct asd_sas_port *port) rphy = NULL; break; } - /* fall through */ + fallthrough; case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index b7d1b1ea185d773998aee67024abc054ec97da58..8d6bcc19359ffda33c6e4dcf181532553408ad2c 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1096,7 +1096,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); - /* fallthrough */ + fallthrough; case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 9e0975e55c27e247c04eb118d799c1f0d752cfae..1bf939818c98182d0d41b78ad3357e0d5dbf440b 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -622,7 +622,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * sas_scsi_clear_queue_lu(work_q, cmd); goto Again; } - /* fallthrough */ + fallthrough; case TASK_IS_NOT_AT_LU: case TASK_ABORT_FAILED: pr_notice("task 0x%p is not at LU: I_T recover\n", diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index ef2015fad2d5929da4cf5d0cab05bcf8d00d9356..d0141a23a833765a09137794781748ef51ac543e 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -3202,7 +3202,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case SLI_MGMT_GHAT: case SLI_MGMT_GRPL: rsp_size = FC_MAX_NS_RSP; - /* fall through */ + fallthrough; case SLI_MGMT_DHBA: case SLI_MGMT_DHAT: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID; @@ -3215,7 +3215,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case SLI_MGMT_GPAT: case SLI_MGMT_GPAS: rsp_size = FC_MAX_NS_RSP; - /* fall through */ + fallthrough; case SLI_MGMT_DPRT: case SLI_MGMT_DPA: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 48dc63f22ccad5427949c9c9ae851dcaab03406f..b60945182db8056f253758f5d9c95ae002cb3a0a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3517,6 +3517,9 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); + prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); + prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); + prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue RDF: did:x%x", @@ -4656,7 +4659,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, out: if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); + if (mbox) + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; spin_unlock_irq(shost->host_lock); /* If the node is not being used by another discovery thread, @@ -9134,7 +9139,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_nlp_put(ndlp); return; } - /* fall through */ + fallthrough; default: /* Try to recover from this error */ if (phba->sli_rev == LPFC_SLI_REV4) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 142a021144793c555242f5f37d58f508f9661ddd..d32c7e7ab09d6603ade22b92c88cb36d06afd7ec 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4728,15 +4728,14 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, case CMD_GEN_REQUEST64_CR: if (iocb->context_un.ndlp == ndlp) return 1; - /* fall through */ + fallthrough; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) return 1; - /* fall through */ + fallthrough; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; - /* fall through */ } } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ @@ -6055,7 +6054,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) case LPFC_LINK_UP: lpfc_issue_clear_la(phba, vport); - /* fall through */ + fallthrough; case LPFC_LINK_UNKNOWN: case LPFC_WARM_START: case LPFC_INIT_START: diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c4ba8273a63fbea46f49b45a92dd85c3a55e444c..12e4e76233e6aede588fd19227c311904d9d33ac 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4800,7 +4800,7 @@ struct send_frame_wqe { uint32_t fc_hdr_wd5; /* word 15 */ }; -#define ELS_RDF_REG_TAG_CNT 1 +#define ELS_RDF_REG_TAG_CNT 4 struct lpfc_els_rdf_reg_desc { struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */ __be32 desc_tags[ELS_RDF_REG_TAG_CNT]; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c697259993159d1c6895f2d1a7c8653e5ae48256..ca25e54bb78249e6e9535eb2c52251ff2498347c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11376,7 +11376,6 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) { cpumask_clear(&eqhdl->aff_mask); irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); - irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); } /** diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index cad53d19cb25f15aeef94add85620e5a153a62b9..92d6e7b98770d764624a586045ae5a94d72d56b9 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -464,7 +464,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) break; - /* fall through */ + fallthrough; case NLP_STE_REG_LOGIN_ISSUE: case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index e5be334d6a119e15ed0f794e138e9655c309e227..0c39ed50998c866ab2521f1913521b3bb1204287 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1225,7 +1225,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_ncmd, nCmd, lpfc_ncmd->cur_iocbq.sli4_xritag, bf_get(lpfc_wcqe_c_xb, wcqe)); - /* fall through */ + fallthrough; default: out_err: lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 5e802c8b22a99229ed1ae072b3c79011723caa1f..983eeb0e3d07e36604ec129c6bf7dbc6ddbfe041 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1093,7 +1093,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, break; } - /* fall through */ + fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the error @@ -1213,7 +1213,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, rc = BG_ERR_TGT | BG_ERR_CHECK; break; } - /* fall through */ + fallthrough; case SCSI_PROT_WRITE_INSERT: /* * For WRITE_INSERT, force the @@ -1295,7 +1295,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, switch (op) { case SCSI_PROT_WRITE_PASS: rc = BG_ERR_CHECK; - /* fall through */ + fallthrough; case SCSI_PROT_WRITE_INSERT: /* @@ -3980,7 +3980,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->cur_iocbq.sli4_lxritag, 0, 0); } - /* fall through */ + fallthrough; default: cmd->result = DID_ERROR << 16; break; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4cd7ded656b7d25c771d36de6082af71888f1c18..e158cd77d387f4c7c4eaf7c001e3af68b3588019 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9339,7 +9339,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, */ if (piocb->iocb_cmpl) piocb->iocb_cmpl = NULL; - /*FALLTHROUGH*/ + fallthrough; case CMD_CREATE_XRI_CR: case CMD_CLOSE_XRI_CN: case CMD_CLOSE_XRI_CX: @@ -9653,7 +9653,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, cmnd = CMD_XMIT_SEQUENCE64_CR; if (phba->link_flag & LS_LOOPBACK_MODE) bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); - /* fall through */ + fallthrough; case CMD_XMIT_SEQUENCE64_CR: /* word3 iocb=io_tag32 wqe=reserved */ wqe->xmit_sequence.rsvd3 = 0; @@ -13630,7 +13630,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2537 Receive Frame Truncated!!\n"); - /* fall through */ + fallthrough; case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -13678,7 +13678,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } - /* fallthrough */ + fallthrough; case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; @@ -14162,7 +14162,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, case FC_STATUS_RQ_BUF_LEN_EXCEEDED: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6126 Receive Frame Truncated!!\n"); - /* fall through */ + fallthrough; case FC_STATUS_RQ_SUCCESS: spin_lock_irqsave(&phba->hbalock, iflags); lpfc_sli4_rq_release(hrq, drq); @@ -14209,7 +14209,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, atomic_read(&tgtp->rcv_fcp_cmd_out), atomic_read(&tgtp->xmt_fcp_release)); } - /* fallthrough */ + fallthrough; case FC_STATUS_INSUFF_BUF_NEED_BUF: hrq->RQ_no_posted_buf++; @@ -15096,7 +15096,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 256: bf_set(lpfc_eq_context_count, &eq_create->u.request.context, LPFC_EQ_CNT_256); @@ -15238,7 +15238,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_CQ_CNT_WORD7); break; } - /* fall through */ + fallthrough; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "0361 Unsupported CQ count: " @@ -15249,7 +15249,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 256: bf_set(lpfc_cq_context_count, &cq_create->u.request.context, LPFC_CQ_CNT_256); @@ -15417,7 +15417,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, LPFC_CQ_CNT_WORD7); break; } - /* fall through */ + fallthrough; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3118 Bad CQ count. (%d)\n", @@ -15426,7 +15426,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest */ + fallthrough; /* otherwise default to smallest */ case 256: bf_set(lpfc_mbx_cq_create_set_cqe_cnt, &cq_set->u.request, LPFC_CQ_CNT_256); @@ -15702,7 +15702,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 16: bf_set(lpfc_mq_context_ring_size, &mq_create_ext->u.request.context, @@ -16123,7 +16123,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, @@ -16260,7 +16260,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, status = -EINVAL; goto out; } - /* fall through - otherwise default to smallest count */ + fallthrough; /* otherwise default to smallest count */ case 512: bf_set(lpfc_rq_context_rqe_count, &rq_create->u.request.context, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 20adec4387f026cb2a242a35b577ee0cfb40761b..c657abf22b751318179fa5cf8ddff8b6d9e0aa1d 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.8.0.3" +#define LPFC_DRIVER_VERSION "12.8.0.4" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 0484ee52ae8023e2be9bc06f6f47fb55999fd612..ac406049e7c8afb37f32dca360ad67ca6994ae84 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -491,9 +491,9 @@ mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) if (adapter->support_random_del && adapter->read_ldidmap ) switch (cmd->cmnd[0]) { - case READ_6: /* fall through */ - case WRITE_6: /* fall through */ - case READ_10: /* fall through */ + case READ_6: + case WRITE_6: + case READ_10: case WRITE_10: ldrv_num += 0x80; } @@ -852,7 +852,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) return scb; #if MEGA_HAVE_CLUSTERING - case RESERVE: /* Fall through */ + case RESERVE: case RELEASE: /* @@ -987,7 +987,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, adapter->flag |= (1L << cmd->device->channel); } - /* Fall through */ + fallthrough; default: pthru->numsgelements = mega_build_sglist(adapter, scb, &pthru->dataxferaddr, &pthru->dataxferlen); @@ -1050,7 +1050,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, adapter->flag |= (1L << cmd->device->channel); } - /* Fall through */ + fallthrough; default: epthru->numsgelements = mega_build_sglist(adapter, scb, &epthru->dataxferaddr, &epthru->dataxferlen); diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 19469a2c0ea36f617f5cb33636c4d7dea4b54f71..4a27ac869f2e20dc86865079a5c687020c4de3fa 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -1581,7 +1581,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) return NULL; } - /* Fall through */ + fallthrough; case READ_CAPACITY: /* diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 861f7140f52e852b7ee375f1a3dce4af068f6b9c..2b7e7b5f38edf997cfc2c03fdd4e2aeae2c17e2c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3522,7 +3522,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, megasas_complete_int_cmd(instance, cmd); break; } - /* fall through */ + fallthrough; case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 0824410f78f8a241a549eaf14ee759b5e15ed151..b0c01cf0428f2f8b507421c7251b26a1cfa4540d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3534,7 +3534,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; } - /* Fall through - and complete IO */ + fallthrough; /* and complete IO */ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ atomic_dec(&instance->fw_outstanding); if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { @@ -3689,7 +3689,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget) instance = irq_ctx->instance; if (irq_ctx->irq_line_enable) { - disable_irq(irq_ctx->os_irq); + disable_irq_nosync(irq_ctx->os_irq); irq_ctx->irq_line_enable = false; } diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index fd1d03064079729580c2c911a6cfa55d2137b975..0a9f4e44ab2cb92cec67457a24ec8e3a0c36588c 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1457,7 +1457,7 @@ static void cmd_complete(struct mesh_state *ms) /* huh? we expected a phase mismatch */ ms->n_msgin = 0; ms->msgphase = msg_in; - /* fall through */ + fallthrough; case msg_in: /* should have some message bytes in fifo */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1d64524cd86315c4e31c5427f9e2649904b5e557..8062bd99add85cdaf6a29391c91f5f1110387a12 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1733,7 +1733,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) reply_q = container_of(irqpoll, struct adapter_reply_queue, irqpoll); if (reply_q->irq_line_enable) { - disable_irq(reply_q->os_irq); + disable_irq_nosync(reply_q->os_irq); reply_q->irq_line_enable = false; } num_entries = _base_process_reply_queue(reply_q); @@ -4681,7 +4681,7 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "performance mode: balanced\n"); return; } - /* Fall through */ + fallthrough; case MPT_PERF_MODE_LATENCY: /* * Enable interrupt coalescing on all reply queues diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 43260306668cfb8d7f9fbb45b672f4c24bc6f61e..7c119b9048349bbd37aefa92b8200c2764358c58 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -1002,7 +1002,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, } /* drop to default case for posting the request */ } - /* fall through */ + fallthrough; default: ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 08fc4b381056c726a6203d44d74323f7241bc517..2e2756d8a49b111002e9eb11066db16c9a9fbc5f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -5470,7 +5470,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: scsi_set_resid(scmd, 0); - /* fall through */ + fallthrough; case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; @@ -6480,7 +6480,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, if (!test_bit(handle, ioc->pend_os_device_add)) break; - /* fall through */ + fallthrough; case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: @@ -7208,7 +7208,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, event_data->PortEntry[i].PortStatus &= 0xF0; event_data->PortEntry[i].PortStatus |= MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; - /* fall through */ + fallthrough; case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: if (ioc->shost_recovery) break; @@ -10653,7 +10653,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: dev_info(&pdev->dev, "HBA is in Configurable Secure mode\n"); - /* fall through */ + fallthrough; case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index d4bd31a75b9dbf8accf12d05c1c6349f9c823163..b2869c5dd7fb53a7c2c4a8701cae280053872270 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -650,7 +650,7 @@ static void myrb_bgi_control(struct myrb_hba *cb) if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) sdev_printk(KERN_INFO, sdev, "Background Initialization Aborted\n"); - /* Fallthrough */ + fallthrough; case MYRB_STATUS_NO_BGI_INPROGRESS: cb->bgi_status.status = MYRB_BGI_INVALID; break; @@ -1528,7 +1528,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, scmd->scsi_done(scmd); return 0; } - /* fall through */ + fallthrough; case WRITE_6: lba = (((scmd->cmnd[1] & 0x1F) << 16) | (scmd->cmnd[2] << 8) | @@ -1545,7 +1545,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, scmd->scsi_done(scmd); return 0; } - /* fall through */ + fallthrough; case WRITE_10: case VERIFY: /* 0x2F */ case WRITE_VERIFY: /* 0x2E */ @@ -1562,7 +1562,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, scmd->scsi_done(scmd); return 0; } - /* fall through */ + fallthrough; case WRITE_12: case VERIFY_12: /* 0xAF */ case WRITE_VERIFY_12: /* 0xAE */ diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index f88adab3f9135ca30f68379bc0e7025bb3f62663..03d70138ad58daffda6e3d211b0c41078d5b3c39 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -3640,7 +3640,7 @@ ncr_script_copy_and_bind (struct ncb *np, ncrcmd *src, ncrcmd *dst, int len) new = old; break; } - /* fall through */ + fallthrough; default: panic("ncr_script_copy_and_bind: weird relocation %x\n", old); break; @@ -3910,14 +3910,14 @@ static void __init ncr_prepare_setting(struct ncb *np) np->scsi_mode = SMODE_HVD; break; } - /* fall through */ + fallthrough; case 3: /* SYMBIOS controllers report HVD through GPIO3 */ if (INB(nc_gpreg) & 0x08) break; - /* fall through */ + fallthrough; case 2: /* Set HVD unconditionally */ np->scsi_mode = SMODE_HVD; - /* fall through */ + fallthrough; case 1: /* Trust previous settings for HVD */ if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; @@ -4296,7 +4296,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) break; cp->phys.header.wgoalp = cpu_to_scr(goalp); cp->phys.header.wlastp = cpu_to_scr(lastp); - /* fall through */ + fallthrough; case DMA_FROM_DEVICE: goalp = NCB_SCRIPT_PHYS (np, data_in2) + 8; if (segments <= MAX_SCATTERL) @@ -6717,7 +6717,7 @@ void ncr_int_sir (struct ncb *np) OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0])); return; } - /* fall through */ + fallthrough; case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */ @@ -6825,7 +6825,7 @@ void ncr_int_sir (struct ncb *np) */ OUTB (HS_PRT, HS_BUSY); - /* fall through */ + fallthrough; case SIR_NEGO_PROTO: /*------------------------------------------------------- diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 8655ff1249bbc981e50f41f6294ec2bc6dc7baa8..bc5a623519e7ba5accaa726a69ae149849ceb8b1 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1113,7 +1113,7 @@ static irqreturn_t nspintr(int irq, void *dev_id) nsp_scsi_done(tmpSC); return IRQ_HANDLED; } - /* fall thru */ + fallthrough; default: if ((irq_status & (IRQSTATUS_SCSI | IRQSTATUS_FIFO)) == 0) { return IRQ_HANDLED; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 337e79d6837ffa86d6dde1e6a0d077692800ce37..9889bab7d31c1ca60cc2b14347d31a2c3b0c1c85 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -818,7 +818,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) - return res; + goto ex_err; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 0ae800c5b7396aa5944eb36bf8bbb4ae5f1f632c..aa41f7ac91cb7bf8bde076f519acb84d2f185896 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -717,7 +717,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) } cmd->SCp.phase++; } - /* fall through */ + fallthrough; case 2: /* Phase 2 - We are now talking to the scsi bus */ if (!ppa_select(dev, scmd_id(cmd))) { @@ -725,7 +725,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) return 0; } cmd->SCp.phase++; - /* fall through */ + fallthrough; case 3: /* Phase 3 - Ready to accept a command */ w_ctr(ppb, 0x0c); @@ -735,7 +735,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) if (!ppa_send_command(cmd)) return 0; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 4: /* Phase 4 - Setup scatter/gather buffers */ if (scsi_bufflen(cmd)) { @@ -749,7 +749,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) } cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 5: /* Phase 5 - Data transfer stage */ w_ctr(ppb, 0x0c); @@ -762,7 +762,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) if (retv == 0) return 1; cmd->SCp.phase++; - /* fall through */ + fallthrough; case 6: /* Phase 6 - Read status/message */ cmd->result = DID_OK << 16; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 3f04f2c8136638117803e9db000ccbcfbe8006e6..5ca424df355c15912e0a9380dc2559d4f0ea988d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3863,7 +3863,7 @@ void qedf_stag_change_work(struct work_struct *work) container_of(work, struct qedf_ctx, stag_work.work); if (!qedf) { - QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); + QEDF_ERR(NULL, "qedf is NULL"); return; } QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n"); diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 91eb6901815c967696ff5a80bf89f69d15c945dd..e1d7de63e8f87c757be9092c3e22853ca7b83f69 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -380,5 +380,8 @@ extern int qla24xx_soft_reset(struct qla_hw_data *); static inline int ql_mask_match(uint level) { + if (ql2xextended_error_logging == 1) + ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; + return (level & ql2xextended_error_logging) == level; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8c92af5e43909334b1e40341c8b27f2e533eb0d1..a165120d2976f711c0915c7504ec268de4aa3d6b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1626,7 +1626,7 @@ typedef struct { */ uint8_t firmware_options[2]; - uint16_t frame_payload_size; + __le16 frame_payload_size; __le16 max_iocb_allocation; __le16 execution_throttle; uint8_t retry_count; @@ -3880,6 +3880,7 @@ struct qla_hw_data { uint32_t scm_supported_f:1; /* Enabled in Driver */ uint32_t scm_enabled:1; + uint32_t max_req_queue_warned:1; } flags; uint16_t max_exchg; diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index df670fba2ab8af641f101cfa366aa4173c8041e5..b569fd6e96d63637f15633c279b3997b367a117c 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -177,7 +177,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, break; case CS_TIMEOUT: rval = QLA_FUNCTION_TIMEOUT; - /* fall through */ + fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x2033, "%s failed, completion status (%x) on port_id: " @@ -1505,11 +1505,11 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, static uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) { + uint speeds = 0; + if (IS_CNA_CAPABLE(ha)) return FDMI_PORT_SPEED_10GB; if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { - uint speeds = 0; - if (ha->max_supported_speed == 2) { if (ha->min_supported_speed <= 6) speeds |= FDMI_PORT_SPEED_64GB; @@ -1536,9 +1536,16 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) } return speeds; } - if (IS_QLA2031(ha)) - return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| - FDMI_PORT_SPEED_4GB; + if (IS_QLA2031(ha)) { + if ((ha->pdev->subsystem_vendor == 0x103C) && + (ha->pdev->subsystem_device == 0x8002)) { + speeds = FDMI_PORT_SPEED_16GB; + } else { + speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB; + } + return speeds; + } if (IS_QLA25XX(ha)) return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; @@ -3436,7 +3443,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) list_for_each_entry(fcport, &vha->vp_fcports, list) { if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { fcport->scan_state = QLA_FCPORT_SCAN; - fcport->logout_on_delete = 0; } } goto login_logout; @@ -3532,10 +3538,22 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } if (fcport->scan_state != QLA_FCPORT_FOUND) { + bool do_delete = false; + + if (fcport->scan_needed && + fcport->disc_state == DSC_LOGIN_PEND) { + /* Cable got disconnected after we sent + * a login. Do delete to prevent timeout. + */ + fcport->logout_on_delete = 1; + do_delete = true; + } + fcport->scan_needed = 0; - if ((qla_dual_mode_enabled(vha) || - qla_ini_mode_enabled(vha)) && - atomic_read(&fcport->state) == FCS_ONLINE) { + if (((qla_dual_mode_enabled(vha) || + qla_ini_mode_enabled(vha)) && + atomic_read(&fcport->state) == FCS_ONLINE) || + do_delete) { if (fcport->loop_id != FC_NO_LOOP_ID) { if (fcport->flags & FCF_FCP2_DEVICE) fcport->logout_on_delete = 0; @@ -3736,6 +3754,18 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) unsigned long flags; const char *name = sp->name; + if (res == QLA_OS_TIMER_EXPIRED) { + /* switch is ignoring all commands. + * This might be a zone disable behavior. + * This means we hit 64s timeout. + * 22s GPNFT + 44s Abort = 64s + */ + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: Switch Zone check please .\n", + name); + qla2x00_mark_all_devices_lost(vha); + } + /* * We are in an Interrupt context, queue up this * sp for GNNFT_DONE work. This will allow all diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 57a2d76aa691d6707c180f6e72a4ffe4b1ab63dd..0bd04a62af836dc113398a31835569aabca2b9f7 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -857,7 +857,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport); break; } - /* fall through */ + fallthrough; default: if (fcport_is_smaller(fcport)) { /* local adapter is bigger */ @@ -4603,18 +4603,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = 2048; + nv->frame_payload_size = cpu_to_le16(2048); nv->special_options[1] = BIT_7; } else if (IS_QLA2200(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = 1024; + nv->frame_payload_size = cpu_to_le16(1024); } else if (IS_QLA2100(ha)) { nv->firmware_options[0] = BIT_3 | BIT_1; nv->firmware_options[1] = BIT_5; - nv->frame_payload_size = 1024; + nv->frame_payload_size = cpu_to_le16(1024); } nv->max_iocb_allocation = cpu_to_le16(256); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e3d2dea0b057c0a997d8dba6e4589174e32aa501..0954fa41911caceb9a59e0d9a997501449ca5fad 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2874,7 +2874,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - /* fall through */ + fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 27bcd346af7c2530c0cb147d897e0bdf13d25bb8..25e0a1684763251da1cae29a66279a89aaea3d8d 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1580,11 +1580,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) qla2xxx_wake_dpc(vha); } } - /* fall through */ + fallthrough; case MBA_IDC_COMPLETE: if (ha->notify_lb_portup_comp && !vha->vp_idx) complete(&ha->lb_portup_comp); - /* Fallthru */ + fallthrough; case MBA_IDC_TIME_EXT: if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) @@ -2024,8 +2024,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, res = DID_ERROR << 16; } } - ql_dbg(ql_dbg_user, vha, 0x503f, - "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", + ql_dbg(ql_dbg_disc, vha, 0x503f, + "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", type, sp->handle, comp_status, fw_status[1], fw_status[2], le32_to_cpu(ese->total_byte_count)); goto els_ct_done; @@ -2188,7 +2188,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - /* fall through */ + fallthrough; default: data[0] = MBS_COMMAND_ERROR; break; @@ -2368,7 +2368,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, case CS_PORT_UNAVAILABLE: case CS_PORT_LOGGED_OUT: fcport->nvme_flag |= NVME_FLAG_RESETTING; - /* fall through */ + fallthrough; case CS_ABORTED: case CS_PORT_BUSY: fd->transferred_length = 0; @@ -3485,7 +3485,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } else { qlt_24xx_process_atio_queue(vha, 1); } - /* fall through */ + fallthrough; case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 73883435ab587b008446ce919702f48ed19e6da0..226f1428d3e52f24efc669f5a5b6f5b1165e5eb7 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -334,14 +334,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (time_after(jiffies, wait_time)) break; - /* - * Check if it's UNLOADING, cause we cannot poll in - * this case, or else a NULL pointer dereference - * is triggered. - */ - if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) - return QLA_FUNCTION_TIMEOUT; - /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); @@ -5240,7 +5232,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_0; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { @@ -5428,7 +5420,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { @@ -5700,7 +5692,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mb != NULL) { @@ -5787,7 +5779,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -5822,7 +5814,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -6014,7 +6006,7 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) if (IS_QLA8031(ha)) mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->in_mb = MBX_0; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -6050,7 +6042,7 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; - mcp->tov = 30; + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index fa695a4007f869dab26186d0aec88051565ab049..90bbc61f361b9561ac18473ad28027721fb863f3 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -536,6 +536,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; + if (!priv) { + /* nvme association has been torn down */ + return rval; + } + fcport = qla_rport->fcport; if (!qpair || !fcport || (qpair && !qpair->fw_started) || @@ -687,7 +692,15 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) tmpl = &qla_nvme_fc_transport; WARN_ON(vha->nvme_local_port); - WARN_ON(ha->max_req_queues < 3); + + if (ha->max_req_queues < 3) { + if (!ha->flags.max_req_queue_warned) + ql_log(ql_log_info, vha, 0x2120, + "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n", + __func__, ha->max_req_queues); + ha->flags.max_req_queue_warned = 1; + return ret; + } qla_nvme_fc_transport.max_hw_queues = min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 9b59f032a569bf31ac95ca0e6297d3049f03244a..8da00ba54aec6c7b070814f1d6dd10d5de7cbf68 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2017,6 +2017,11 @@ qla2x00_iospace_config(struct qla_hw_data *ha) /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; + + /* Check if FW supports MQ or not */ + if (!(ha->fw_attributes & BIT_6)) + goto mqiobase_exit; + if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -2829,10 +2834,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* This may fail but that's ok */ pci_enable_pcie_error_reporting(pdev); - /* Turn off T10-DIF when FC-NVMe is enabled */ - if (ql2xnvmeenable) - ql2xenabledif = 0; - ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); if (!ha) { ql_log_pci(ql_log_fatal, pdev, 0x0009, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index e161c05d7d826a8936c919d58d7909d7fd73e6b8..411b8a9ff393b425b254866f82f68e9f9e2b9d82 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2457,7 +2457,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_mask = 0x10000; break; } - /* Fall through... */ + fallthrough; case 0x1f: /* Atmel flash. */ /* 512k sector size. */ @@ -2466,7 +2466,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_mask = 0x80000000; break; } - /* Fall through... */ + fallthrough; case 0x01: /* AMD flash. */ if (flash_id == 0x38 || flash_id == 0x40 || @@ -2499,7 +2499,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_mask = 0x1e000; break; } - /* fall through */ + fallthrough; default: /* Default to 16 kb sector size. */ rest_addr = 0x3fff; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index fbb80a043b4fe0d102218e916febae8a434d1c3c..2d445bdb2129090a7aa0f0b688a21bc632de9d75 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -442,7 +442,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe073, "qla_target(%d):%s: CRC2 Response pkt\n", vha->vp_idx, __func__); - /* fall through */ + fallthrough; case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; @@ -1270,7 +1270,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) qla24xx_chk_fcp_state(sess); - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, + ql_dbg(ql_dbg_disc, sess->vha, 0xe001, "Scheduling sess %p for deletion %8phC\n", sess, sess->port_name); @@ -4423,7 +4423,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, case QLA_TGT_CLEAR_TS: case QLA_TGT_ABORT_TS: abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - /* fall through */ + fallthrough; case QLA_TGT_CLEAR_ACA: h = qlt_find_qphint(vha, mcmd->unpacked_lun); mcmd->qpair = h->qpair; @@ -5057,7 +5057,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, res = 1; break; } - /* fall through */ + fallthrough; case ELS_LOGO: case ELS_PRLO: spin_lock_irqsave(&ha->tgt.sess_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index bab87e47b238dbdb0c4a07e534238661140d84cd..676778cbc55099727422bb3730f35cb7d84dcf9f 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2907,7 +2907,7 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, chap_tbl.secret_len); } } - /* fall through */ + fallthrough; default: return iscsi_session_get_param(cls_sess, param, buf); } diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 3790e8b70bba941e638ed9082687629168a54f59..48ff7d88af8604c2bee81c9b2c739ced57536fa2 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -200,15 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f /* Write mailbox command registers. */ switch (mbox_param[param[0]] >> 4) { case 6: sbus_writew(param[5], qpti->qregs + MBOX5); - /* Fall through */ + fallthrough; case 5: sbus_writew(param[4], qpti->qregs + MBOX4); - /* Fall through */ + fallthrough; case 4: sbus_writew(param[3], qpti->qregs + MBOX3); - /* Fall through */ + fallthrough; case 3: sbus_writew(param[2], qpti->qregs + MBOX2); - /* Fall through */ + fallthrough; case 2: sbus_writew(param[1], qpti->qregs + MBOX1); - /* Fall through */ + fallthrough; case 1: sbus_writew(param[0], qpti->qregs + MBOX0); } @@ -259,15 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f /* Read back output parameters. */ switch (mbox_param[param[0]] & 0xf) { case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); - /* Fall through */ + fallthrough; case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); - /* Fall through */ + fallthrough; case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); - /* Fall through */ + fallthrough; case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); - /* Fall through */ + fallthrough; case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); - /* Fall through */ + fallthrough; case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 064ed680c05309c64aeb0d7ae1881f8f0bcb7329..1ad7260d4758f4d0d2a232f6bb2561bc3a940a9b 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -4482,8 +4482,6 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) goto fini; } - if (zc == ZC2_IMPLICIT_OPEN) - zbc_close_zone(devip, zsp); zbc_open_zone(devip, zsp, true); fini: write_unlock(macc_lckp); @@ -5490,9 +5488,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, u64 d = ktime_get_boottime_ns() - ns_from_boot; if (kt <= d) { /* elapsed duration >= kt */ + spin_lock_irqsave(&sqp->qc_lock, iflags); sqcp->a_cmnd = NULL; atomic_dec(&devip->num_in_q); clear_bit(k, sqp->in_use_bm); + spin_unlock_irqrestore(&sqp->qc_lock, iflags); if (new_sd_dp) kfree(sd_dp); /* call scsi_done() from this thread */ diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 927b1e6418423fa6e08b82d75ff53b6b94dc1d0f..7d3571a2bd89bb802f4457c925f1acff21dc2fbb 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -599,7 +599,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_ALLOC_FAILURE); return SUCCESS; } - /* FALLTHROUGH */ + fallthrough; case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: @@ -621,7 +621,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) return ADD_TO_MLQUEUE; else set_host_byte(scmd, DID_TARGET_FAILURE); - /* FALLTHROUGH */ + fallthrough; case ILLEGAL_REQUEST: if (sshdr.asc == 0x20 || /* Invalid command operation code */ @@ -734,7 +734,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) switch (status_byte(scmd->result)) { case GOOD: scsi_handle_queue_ramp_up(scmd->device); - /* FALLTHROUGH */ + fallthrough; case COMMAND_TERMINATED: return SUCCESS; case CHECK_CONDITION: @@ -755,7 +755,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) return FAILED; case QUEUE_FULL: scsi_handle_queue_full(scmd->device); - /* fall through */ + fallthrough; case BUSY: return NEEDS_RETRY; default: @@ -1302,7 +1302,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) case NEEDS_RETRY: if (retry_cnt--) goto retry_tur; - /*FALLTHRU*/ + fallthrough; case SUCCESS: return 0; default: @@ -1739,7 +1739,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) if (msg_byte(scmd->result) == COMMAND_COMPLETE && status_byte(scmd->result) == RESERVATION_CONFLICT) return 0; - /* fall through */ + fallthrough; case DID_SOFT_ERROR: return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); } @@ -1810,7 +1810,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } - /* FALLTHROUGH */ + fallthrough; case DID_NO_CONNECT: case DID_BAD_TARGET: /* @@ -1854,7 +1854,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * lower down */ break; - /* fallthrough */ + fallthrough; case DID_BUS_BUSY: case DID_PARITY: goto maybe_retry; @@ -1892,7 +1892,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * the case of trying to send too many commands to a * tagged queueing device. */ - /* FALLTHROUGH */ + fallthrough; case BUSY: /* * device can't talk to us at the moment. Should only @@ -1905,7 +1905,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) if (scmd->cmnd[0] == REPORT_LUNS) scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); - /* FALLTHROUGH */ + fallthrough; case COMMAND_TERMINATED: return SUCCESS; case TASK_ABORTED: @@ -2376,22 +2376,22 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) rtn = scsi_try_bus_device_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_TARGET: rtn = scsi_try_target_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_BUS: rtn = scsi_try_bus_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_HOST: rtn = scsi_try_host_reset(scmd); if (rtn == SUCCESS) break; - /* FALLTHROUGH */ + fallthrough; default: rtn = FAILED; break; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 45d04b7b26439896b3caa4654222d77255994750..14872c9dc78cc6738a29043c8781de2952d23a75 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -117,14 +117,14 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, case NOT_READY: /* This happens if there is no disc in drive */ if (sdev->removable) break; - /* FALLTHROUGH */ + fallthrough; case UNIT_ATTENTION: if (sdev->removable) { sdev->changed = 1; result = 0; /* This is no longer considered an error */ break; } - /* FALLTHROUGH -- for non-removable media */ + fallthrough; /* for non-removable media */ default: sdev_printk(KERN_INFO, sdev, "ioctl_internal_command return code = %x\n", diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7c6dd6f751907bf961334d7fae0f4d41d5e7bdad..7affaaf8b98e0c8cbc819e69bc017d1be8b29822 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -795,7 +795,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) } if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) return; - /*FALLTHRU*/ + fallthrough; case ACTION_REPREP: scsi_io_completion_reprep(cmd, q); break; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index bd38c8cea56e147a48fe7571cf1ee9b1f97be681..ca1e6cf6a38ef61df1417dbe51c31e049c4943e8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -516,7 +516,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, break; case BMIC_SENSE_DIAG_OPTIONS: cdb_length = 0; - /* fall through */ + fallthrough; case BMIC_IDENTIFY_CONTROLLER: case BMIC_IDENTIFY_PHYSICAL_DEVICE: case BMIC_SENSE_SUBSYSTEM_INFORMATION: @@ -527,7 +527,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, break; case BMIC_SET_DIAG_OPTIONS: cdb_length = 0; - /* fall through */ + fallthrough; case BMIC_WRITE_HOST_WELLNESS: request->data_direction = SOP_WRITE_FLAG; cdb[0] = BMIC_WRITE; @@ -2324,7 +2324,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, switch (scmd->cmnd[0]) { case WRITE_6: is_write = true; - /* fall through */ + fallthrough; case READ_6: first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); @@ -2334,21 +2334,21 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, break; case WRITE_10: is_write = true; - /* fall through */ + fallthrough; case READ_10: first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); break; case WRITE_12: is_write = true; - /* fall through */ + fallthrough; case READ_12: first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); block_cnt = get_unaligned_be32(&scmd->cmnd[6]); break; case WRITE_16: is_write = true; - /* fall through */ + fallthrough; case READ_16: first_block = get_unaligned_be64(&scmd->cmnd[2]); block_cnt = get_unaligned_be32(&scmd->cmnd[10]); @@ -2948,7 +2948,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: if (io_request->scmd) io_request->scmd->result = 0; - /* fall through */ + fallthrough; case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: break; case PQI_RESPONSE_IU_VENDOR_GENERAL: @@ -3115,12 +3115,11 @@ static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, switch (reset_status) { case RESET_INITIATE_DRIVER: - /* fall through */ case RESET_TIMEDOUT: dev_info(&ctrl_info->pci_dev->dev, "resetting controller %u\n", ctrl_info->ctrl_id); sis_soft_reset(ctrl_info); - /* fall through */ + fallthrough; case RESET_INITIATE_FIRMWARE: rc = pqi_ofa_ctrl_restart(ctrl_info); pqi_ofa_free_host_buffer(ctrl_info); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 0c4aa4665a2f9d2a6fa9db565a220f01322c5ec1..3b3a53c6a0de530ba6eef5014d3812a89eac4015 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -877,10 +877,10 @@ static void get_sectorsize(struct scsi_cd *cd) case 2340: case 2352: sector_size = 2048; - /* fall through */ + fallthrough; case 2048: cd->capacity *= 4; - /* fall through */ + fallthrough; case 512: break; default: diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 87fbc0ea350b13bcf502ffb12350545a588c585e..e2e5356a997de58051279afa09fac75f984d32ca 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -339,14 +339,14 @@ static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) switch (sense[0] & 0x7f) { case 0x71: s->deferred = 1; - /* fall through */ + fallthrough; case 0x70: s->fixed_format = 1; s->flags = sense[2] & 0xe0; break; case 0x73: s->deferred = 1; - /* fall through */ + fallthrough; case 0x72: s->fixed_format = 0; ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4); @@ -2723,7 +2723,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon switch (cmd_in) { case MTFSFM: chg_eof = 0; /* Changed from the FSF after this */ - /* fall through */ + fallthrough; case MTFSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ @@ -2738,7 +2738,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon break; case MTBSFM: chg_eof = 0; /* Changed from the FSF after this */ - /* fall through */ + fallthrough; case MTBSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 701b842296f05259a47f6543b1432debbcf42cef..2e3fbc2fae976bfbcfd77878bef47923ff709f7d 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -397,12 +397,12 @@ static int sun3scsi_dma_finish(int write_flag) case CSR_LEFT_3: *vaddr = (dregs->bpack_lo & 0xff00) >> 8; vaddr--; - /* Fall through */ + fallthrough; case CSR_LEFT_2: *vaddr = (dregs->bpack_hi & 0x00ff); vaddr--; - /* Fall through */ + fallthrough; case CSR_LEFT_1: *vaddr = (dregs->bpack_hi & 0xff00) >> 8; diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c index 6d7651a7847e82704111e121febb897f96f63794..c6db61b61de3a5b9b9ede1dad9ed112591cd5e3d 100644 --- a/drivers/scsi/sym53c8xx_2/sym_fw.c +++ b/drivers/scsi/sym53c8xx_2/sym_fw.c @@ -523,7 +523,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len) new = old; break; } - /* fall through */ + fallthrough; default: new = 0; panic("sym_fw_bind_script: " diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 8410117d5aa4413285c8d47fe9584a66127144eb..cc11daa1222b160f73afeab1b84f1838cd112bc5 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -3059,7 +3059,7 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb sym_print_addr(cp->cmd, "%s\n", s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); } - /* fall through */ + fallthrough; default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ sym_complete_error (np, cp); break; @@ -4620,7 +4620,7 @@ static void sym_int_sir(struct sym_hcb *np) * Negotiation failed. * Target does not want answer message. */ - /* fall through */ + fallthrough; case SIR_NEGO_PROTO: sym_nego_default(np, tp, cp); goto out; diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c index d37e2a69136ad62a54853df2da959e3ddba34d9c..e13d5351f155f3ae01783d2e8ca49560ab9af03e 100644 --- a/drivers/scsi/sym53c8xx_2/sym_nvram.c +++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c @@ -695,7 +695,7 @@ static int sym_read_Tekram_nvram (struct sym_device *np, Tekram_nvram *nvram) data, len); if (!x) break; - /* fall through */ + fallthrough; default: x = sym_read_T93C46_nvram(np, nvram); break; diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c index 46bb905b4d6a90931f5aeaaa080d698448c56f07..eafe0db98d5423de9d064aff54841859d50f1f44 100644 --- a/drivers/scsi/ufs/ti-j721e-ufs.c +++ b/drivers/scsi/ufs/ti-j721e-ufs.c @@ -38,6 +38,7 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev) /* Select MPHY refclk frequency */ clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(dev, "Cannot claim MPHY clock.\n"); goto clk_err; } diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index 29cd017c1aa066e77b0e3f884c72e4f6c5967ab4..1755dd6b04aece71d447f36721616873376bd6e9 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -212,7 +212,7 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, ktime_t timeout, time_checked; u32 val; - timeout = ktime_add_us(ktime_get(), ms_to_ktime(max_wait_ms)); + timeout = ktime_add_ms(ktime_get(), max_wait_ms); do { time_checked = ktime_get(); ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index bcfbbd0d5c4560af3abb0007cf5b5eb969cdd0de..5b2bc1a6f9226c2b1cfd925b9a6d83d014f9f839 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -110,7 +110,7 @@ static int ufs_bsg_request(struct bsg_job *job) goto out; } - /* fall through */ + fallthrough; case UPIU_TRANSACTION_NOP_OUT: case UPIU_TRANSACTION_TASK_REQ: ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req, diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index f407b13883acbcb097045009523e9ca1bcec858d..5a95a7bfbab0c1bc98c49da33f5efb62ac77d4b3 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -44,11 +44,23 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba, return err; } +static int ufs_intel_ehl_init(struct ufs_hba *hba) +{ + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + return 0; +} + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .link_startup_notify = ufs_intel_link_startup_notify, }; +static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = { + .name = "intel-pci", + .init = ufs_intel_ehl_init, + .link_startup_notify = ufs_intel_link_startup_notify, +}; + #ifdef CONFIG_PM_SLEEP /** * ufshcd_pci_suspend - suspend power management function @@ -177,8 +189,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, - { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, - { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 30762228423981253c7cf4908feca8d99d51543b..1d157ff58d81721387d25a31a52af5f0bd96ad8f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1561,6 +1561,7 @@ static void ufshcd_ungate_work(struct work_struct *work) int ufshcd_hold(struct ufs_hba *hba, bool async) { int rc = 0; + bool flush_result; unsigned long flags; if (!ufshcd_is_clkgating_allowed(hba)) @@ -1592,7 +1593,9 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) break; } spin_unlock_irqrestore(hba->host->host_lock, flags); - flush_work(&hba->clk_gating.ungate_work); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; spin_lock_irqsave(hba->host->host_lock, flags); goto start; } @@ -1609,7 +1612,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) * currently running. Hence, fall through to cancel gating * work and to enable clocks. */ - /* fallthrough */ + fallthrough; case CLKS_OFF: ufshcd_scsi_block_requests(hba); hba->clk_gating.state = REQ_CLKS_ON; @@ -1621,7 +1624,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) * fall through to check if we should wait for this * work to be done or not. */ - /* fallthrough */ + fallthrough; case REQ_CLKS_ON: if (async) { rc = -EAGAIN; @@ -4734,7 +4737,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) switch (scsi_status) { case SAM_STAT_CHECK_CONDITION: ufshcd_copy_sense_data(lrbp); - /* fallthrough */ + fallthrough; case SAM_STAT_GOOD: result |= DID_OK << 16 | COMMAND_COMPLETE << 8 | @@ -5941,7 +5944,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status; + u32 intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; @@ -5955,7 +5958,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) * read, make sure we handle them by checking the interrupt status * again in a loop until we process all of the reqs before returning. */ - do { + while (intr_status && retries--) { enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) @@ -5964,9 +5967,9 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) retval |= ufshcd_sl_intr(hba, enabled_intr_status); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - } while (intr_status && --retries); + } - if (retval == IRQ_NONE) { + if (enabled_intr_status && retval == IRQ_NONE) { dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", __func__, intr_status); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); @@ -6274,7 +6277,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, switch (msgcode) { case UPIU_TRANSACTION_NOP_OUT: cmd_type = DEV_CMD_TYPE_NOP; - /* fall through */ + fallthrough; case UPIU_TRANSACTION_QUERY_REQ: ufshcd_hold(hba, false); mutex_lock(&hba->dev_cmd.lock); @@ -6434,14 +6437,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto out; } - if (!(reg & (1 << tag))) { - dev_err(hba->dev, - "%s: cmd was completed, but without a notifying intr, tag = %d", - __func__, tag); - } - /* Print Transfer Request of aborted task */ - dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); + dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); /* * Print detailed info about aborted request. @@ -6462,6 +6459,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) } hba->req_abort_count++; + if (!(reg & (1 << tag))) { + dev_err(hba->dev, + "%s: cmd was completed, but without a notifying intr, tag = %d", + __func__, tag); + goto cleanup; + } + /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { err = -EIO; @@ -6492,7 +6496,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) /* command completed already */ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", __func__, tag); - goto out; + goto cleanup; } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", @@ -6526,6 +6530,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto out; } +cleanup: scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index b2ef18f1b7467711e244ee1523d0d5a072190904..363589c0bd370425010d3981a80d0324d7c700db 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -520,6 +520,12 @@ enum ufshcd_quirks { * OCS FATAL ERROR with device error through sense data */ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, + + /* + * This quirk needs to be enabled if the host controller has + * auto-hibernate capability but it doesn't work. + */ + UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11, }; enum ufshcd_caps { @@ -803,7 +809,8 @@ return true; static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) { - return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT); + return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) && + !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8); } static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index ca1c39b6f631f8ce53ca5bba2d7ae985eb398aea..3b18034320903437605d988e8f05838c88fd8091 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -148,7 +148,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) default: scmd_printk(KERN_WARNING, sc, "Unknown response %d", resp->response); - /* fall through */ + fallthrough; case VIRTIO_SCSI_S_FAILURE: set_host_byte(sc, DID_ERROR); break; diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 8dbb4db6831afbe0bc3e65c67a69763f76990999..081f54ab7d86cb2a076cc71284a76e8c134145e5 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -607,7 +607,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, case BTSTAT_TAGREJECT: case BTSTAT_BADMSG: cmd->result = (DRIVER_INVALID << 24); - /* fall through */ + fallthrough; case BTSTAT_HAHARDWARE: case BTSTAT_INVPHASE: diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index f81046f0e68a69bea44e2f311a3d1f401b97a02c..87dafbc942d37ee0195d2fff8e01aa87ca020769 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -1854,7 +1854,7 @@ round_4(unsigned int x) case 1: --x; break; case 2: ++x; - /* fall through */ + fallthrough; case 3: ++x; } return x; diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index f0068e96a177fc0b48390d826638181983fdb248..259fc248d06cffa7278592d79cff2b6207987bda 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -1111,7 +1111,7 @@ static void scsifront_backend_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's Closing state */ + fallthrough; /* Missed the backend's Closing state */ case XenbusStateClosing: scsifront_disconnect(info); break; diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index e19102f463027a6b1c93760500ecad9aed411cfc..b25d0f7dac9e8ad38914d445ec12f83f4ecb79ab 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -353,7 +353,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.nmodem_supported); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 14): qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters); qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset); @@ -368,14 +368,14 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, &qcom_socinfo->info.num_defective_parts); debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.ndefective_parts_array_offset); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 13): qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id); debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.nproduct_id); DEBUGFS_ADD(info, chip_id); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 12): qcom_socinfo->info.chip_family = __le32_to_cpu(info->chip_family); @@ -392,7 +392,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_x32("raw_device_number", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.raw_device_num); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 11): case SOCINFO_VERSION(0, 10): case SOCINFO_VERSION(0, 9): @@ -400,12 +400,12 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.foundry_id); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 8): case SOCINFO_VERSION(0, 7): DEBUGFS_ADD(info, pmic_model); DEBUGFS_ADD(info, pmic_die_rev); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 6): qcom_socinfo->info.hw_plat_subtype = __le32_to_cpu(info->hw_plat_subtype); @@ -413,7 +413,7 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("hardware_platform_subtype", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.hw_plat_subtype); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 5): qcom_socinfo->info.accessory_chip = __le32_to_cpu(info->accessory_chip); @@ -421,27 +421,27 @@ static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, debugfs_create_u32("accessory_chip", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.accessory_chip); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 4): qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver); debugfs_create_u32("platform_version", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.plat_ver); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 3): qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat); debugfs_create_u32("hardware_platform", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.hw_plat); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 2): qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver); debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root, &qcom_socinfo->info.raw_ver); - /* Fall through */ + fallthrough; case SOCINFO_VERSION(0, 1): DEBUGFS_ADD(info, build_id); break; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 42cf37a0556b30946e7d2f677c98926a7449fdef..d332e5d9abac49001e1d2acbcaafa02865124300 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2229,7 +2229,7 @@ static int tegra_pmc_clk_notify_cb(struct notifier_block *nb, case POST_RATE_CHANGE: pmc->rate = data->new_rate; - /* fall through */ + fallthrough; case ABORT_RATE_CHANGE: mutex_unlock(&pmc->powergates_lock); diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index e6e0fb9a81b4c00209fc40fdfa9d3764c59ce6cd..da0201693c24d591fedf76a2b9888fe16b0429d0 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1372,7 +1372,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) return ret; } - /* Read Instat 1, Instat 2 and Instat 3 registers */ + /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ ret = sdw_read(slave, SDW_SCP_INT1); if (ret < 0) { dev_err(slave->bus->dev, diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 37290a799023cb379f06c191d03057e7a8d9409c..6e36deb505b1e1e76013ccc83e9e93e0817be4df 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -717,6 +717,7 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count) kfree(wbuf); error_1: kfree(wr_msg); + bus->defer_msg.msg = NULL; return ret; } @@ -840,9 +841,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream) error: list_for_each_entry(m_rt, &stream->master_list, stream_node) { bus = m_rt->bus; - - kfree(bus->defer_msg.msg->buf); - kfree(bus->defer_msg.msg); + if (bus->defer_msg.msg) { + kfree(bus->defer_msg.msg->buf); + kfree(bus->defer_msg.msg); + } } msg_unlock: diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index c3008e423f59e36a9e3d1ad3fd9ef3348bccd214..c6ea760ea5f04e4cd5056ad210c3e68d85ad37fd 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1017,4 +1017,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 2f717812c76617d4f989c8082d0a13ebca1e1a07..03b034c15d2beb32d1ec2c450bfa7e8286b13053 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -164,10 +164,10 @@ static inline void bcm2835aux_rd_fifo(struct bcm2835aux_spi *bs) switch (count) { case 3: *bs->rx_buf++ = (data >> 16) & 0xff; - /* fallthrough */ + fallthrough; case 2: *bs->rx_buf++ = (data >> 8) & 0xff; - /* fallthrough */ + fallthrough; case 1: *bs->rx_buf++ = (data >> 0) & 0xff; /* fallthrough - no default */ diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 1c1a9d17eec0d4af3bcd11132cfb7a87435fccfd..c6795c684b16a61d0c3ba61bb1f924f898a222a9 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -907,14 +907,16 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; dma_addr_t dma_dst; + struct device *ddev; if (!cqspi->rx_chan || !virt_addr_valid(buf)) { memcpy_fromio(buf, cqspi->ahb_base + from, len); return 0; } - dma_dst = dma_map_single(dev, buf, len, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, dma_dst)) { + ddev = cqspi->rx_chan->device->dev; + dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(ddev, dma_dst)) { dev_err(dev, "dma mapping failed\n"); return -ENOMEM; } @@ -948,7 +950,7 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, } err_unmap: - dma_unmap_single(dev, dma_dst, len, DMA_FROM_DEVICE); + dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE); return ret; } @@ -1128,8 +1130,17 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) return 0; } +static const char *cqspi_get_name(struct spi_mem *mem) +{ + struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master); + struct device *dev = &cqspi->pdev->dev; + + return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select); +} + static const struct spi_controller_mem_ops cqspi_mem_ops = { .exec_op = cqspi_exec_mem_op, + .get_name = cqspi_get_name, }; static int cqspi_setup_flash(struct cqspi_st *cqspi) diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 54ad0ac121e5bd12f3dde92b734ea42f085a7f81..ee905880769e6c86362c8bbdd5edcce3c0d324ad 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -226,7 +226,7 @@ static void fsl_spi_free_dummy_rx(void) case 1: kfree(fsl_dummy_rx); fsl_dummy_rx = NULL; - /* fall through */ + fallthrough; default: fsl_dummy_rx_refcnt--; break; @@ -294,7 +294,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) switch (mspi->subblock) { default: dev_warn(dev, "cell-index unspecified, assuming SPI1\n"); - /* fall through */ + fallthrough; case 0: mspi->subblock = QE_CR_SUBBLOCK_SPI1; break; diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 9522d1b5786d5c9e5fc49d6de14027c312ecdc1d..df981e55c24c99b618fa4a0e2849bfaab6b88ebc 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -90,7 +90,7 @@ static struct spi_test spi_tests[] = { { .description = "tx/rx-transfer - crossing PAGE_SIZE", .fill_option = FILL_COUNT_8, - .iterate_len = { ITERATE_MAX_LEN }, + .iterate_len = { ITERATE_LEN }, .iterate_tx_align = ITERATE_ALIGN, .iterate_rx_align = ITERATE_ALIGN, .transfer_count = 1, diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index bd23c4689b46317209b6bc8dbd30c00fe85acf90..127b8bd258312770a1ba15a60d4b5d64f279d171 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -506,7 +506,7 @@ static int sprd_adi_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "failed to find hwlock id, %d\n", ret); - /* fall-through */ + fallthrough; case -EPROBE_DEFER: goto put_ctlr; } diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 4c643dfc7fbbcc500fb301df5956d0b220e966a2..3056428b09f314c128226664c0a5cd55244739f8 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -441,7 +442,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, { u32 div, mbrdiv; - div = DIV_ROUND_UP(spi->clk_rate, speed_hz); + /* Ensure spi->clk_rate is even */ + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -467,20 +469,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, /** * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure + * @xfer_len: length of the message to be transferred */ -static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) { - u32 fthlv, half_fifo; + u32 fthlv, half_fifo, packet; /* data packet should not exceed 1/2 of fifo space */ half_fifo = (spi->fifo_size / 2); + /* data_packet should not exceed transfer length */ + if (half_fifo > xfer_len) + packet = xfer_len; + else + packet = half_fifo; + if (spi->cur_bpw <= 8) - fthlv = half_fifo; + fthlv = packet; else if (spi->cur_bpw <= 16) - fthlv = half_fifo / 2; + fthlv = packet / 2; else - fthlv = half_fifo / 4; + fthlv = packet / 4; /* align packet size with data registers access */ if (spi->cur_bpw > 8) @@ -488,6 +497,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) else fthlv -= (fthlv % 4); /* multiple of 4 */ + if (!fthlv) + fthlv = 1; + return fthlv; } @@ -924,7 +936,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } if (sr & STM32H7_SPI_SR_SUSP) { - dev_warn(spi->dev, "Communication suspended\n"); + static DEFINE_RATELIMIT_STATE(rs, + DEFAULT_RATELIMIT_INTERVAL * 10, + 1); + if (__ratelimit(&rs)) + dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); /* @@ -966,13 +982,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); + writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { - spi_finalize_current_transfer(master); stm32h7_spi_disable(spi); + spi_finalize_current_transfer(master); } return IRQ_HANDLED; @@ -1393,7 +1409,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi) cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen); fthlv = spi->cur_fthlv - 1; cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; @@ -1585,39 +1601,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, unsigned long flags; unsigned int comm_type; int nb_words, ret = 0; + int mbr; spin_lock_irqsave(&spi->lock, flags); - if (spi->cur_bpw != transfer->bits_per_word) { - spi->cur_bpw = transfer->bits_per_word; - spi->cfg->set_bpw(spi); - } + spi->cur_xferlen = transfer->len; - if (spi->cur_speed != transfer->speed_hz) { - int mbr; + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, - spi->cfg->baud_rate_div_min, - spi->cfg->baud_rate_div_max); - if (mbr < 0) { - ret = mbr; - goto out; - } - - transfer->speed_hz = spi->cur_speed; - stm32_spi_set_mbr(spi, mbr); + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; } - comm_type = stm32_spi_communication_type(spi_dev, transfer); - if (spi->cur_comm != comm_type) { - ret = spi->cfg->set_mode(spi, comm_type); + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); - if (ret < 0) - goto out; + comm_type = stm32_spi_communication_type(spi_dev, transfer); + ret = spi->cfg->set_mode(spi, comm_type); + if (ret < 0) + goto out; - spi->cur_comm = comm_type; - } + spi->cur_comm = comm_type; if (spi->cfg->set_data_idleness) spi->cfg->set_data_idleness(spi, transfer->len); @@ -1635,8 +1645,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, goto out; } - spi->cur_xferlen = transfer->len; - dev_dbg(spi->dev, "transfer communication mode set to %d\n", spi->cur_comm); dev_dbg(spi->dev, @@ -1996,6 +2004,8 @@ static int stm32_spi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); + pinctrl_pm_select_sleep_state(&pdev->dev); + return 0; } @@ -2007,13 +2017,18 @@ static int stm32_spi_runtime_suspend(struct device *dev) clk_disable_unprepare(spi->clk); - return 0; + return pinctrl_pm_select_sleep_state(dev); } static int stm32_spi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct stm32_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; return clk_prepare_enable(spi->clk); } @@ -2043,10 +2058,23 @@ static int stm32_spi_resume(struct device *dev) return ret; ret = spi_master_resume(master); - if (ret) + if (ret) { clk_disable_unprepare(spi->clk); + return ret; + } - return ret; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "Unable to power device:%d\n", ret); + return ret; + } + + spi->cfg->config(spi); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; } #endif diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6626587e77b4b15f1b4c8792022eb60e10a628a4..0cab239d8e7fc00983084107c7a74f5f0367af74 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list); */ static DEFINE_MUTEX(board_lock); +/* + * Prevents addition of devices with same chip select and + * addition of devices below an unregistering controller. + */ +static DEFINE_MUTEX(spi_add_lock); + /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected @@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + /* Descriptors take precedence */ if (ctlr->cs_gpiods) spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; @@ -1315,8 +1327,6 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); - spi_res_release(ctlr, msg); - spi_finalize_current_message(ctlr); return ret; @@ -1713,6 +1723,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); + /* In the prepare_messages callback the spi bus has the opportunity to + * split a transfer to smaller chunks. + * Release splited transfers here since spi_map_msg is done on the + * splited transfers. + */ + spi_res_release(ctlr, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { @@ -2795,6 +2812,10 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(&spi_add_lock); + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ @@ -2815,6 +2836,9 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(&spi_add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 823dc99be46f15adf58389c5fd82e06f7627e7cf..a8d2525e7af9e8a098aafdc4a5caf69f43462992 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -425,7 +425,7 @@ void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); break; } - /* Fall through */ + fallthrough; default: *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); } diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c index 1ca2ac5ef2b80f2f2e447f370c26584cd4df256b..354486b7ed3a718c41e3ceaa3da56a3d7e38b50f 100644 --- a/drivers/ssb/driver_mipscore.c +++ b/drivers/ssb/driver_mipscore.c @@ -342,7 +342,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore) set_irq(dev, irq++); break; } - /* fallthrough */ + fallthrough; case SSB_DEV_EXTIF: set_irq(dev, 0); break; diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c index b97a5c32d44abc0a11cf58a1d1d95c281ff512e4..f49ab1aa2149ab28213eed3639804d117ce15ea5 100644 --- a/drivers/ssb/scan.c +++ b/drivers/ssb/scan.c @@ -228,7 +228,7 @@ static void __iomem *ssb_ioremap(struct ssb_bus *bus, switch (bus->bustype) { case SSB_BUSTYPE_SSB: /* Only map the first core for now. */ - /* fallthrough... */ + fallthrough; case SSB_BUSTYPE_PCMCIA: mmio = ioremap(baseaddr, SSB_CORE_SIZE); break; diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c index 8b100a71f02ef09919fcb38acf561b006f98b14a..237531ba60f30ea7722bc088f801011a1a861f1f 100644 --- a/drivers/staging/greybus/audio_helper.c +++ b/drivers/staging/greybus/audio_helper.c @@ -173,8 +173,7 @@ static int gbaudio_remove_controls(struct snd_card *card, struct device *dev, id.index = control->index; kctl = snd_ctl_find_id(card, &id); if (!kctl) { - dev_err(dev, "%d: Failed to find %s\n", err, - control->name); + dev_err(dev, "Failed to find %s\n", control->name); continue; } err = snd_ctl_remove(card, kctl); diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index 2f9fdbdcd547017b8704b7839f8cb33f658ab110..83b38ae8908c1be464df121c139349435fbaed07 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -456,6 +456,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0] & mask; connect = !!val; + ret = gb_pm_runtime_get_sync(bundle); + if (ret) + return ret; + + ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); + if (ret) + goto exit; + /* update ucontrol */ if (gbvalue.value.integer_value[0] != val) { for (wi = 0; wi < wlist->num_widgets; wi++) { @@ -466,25 +475,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, gbvalue.value.integer_value[0] = cpu_to_le32(ucontrol->value.integer.value[0]); - ret = gb_pm_runtime_get_sync(bundle); - if (ret) - return ret; - ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id, GB_AUDIO_INVALID_INDEX, &gbvalue); - - gb_pm_runtime_put_autosuspend(bundle); - - if (ret) { - dev_err_ratelimited(codec_dev, - "%d:Error in %s for %s\n", ret, - __func__, kcontrol->id.name); - return ret; - } } - return 0; +exit: + gb_pm_runtime_put_autosuspend(bundle); + if (ret) + dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, + __func__, kcontrol->id.name); + return ret; } #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 8ea65bef35d24f4db4caa79c246e46f6df9d59ab..a4e4eef55f352a55b5c2363e6d472c0ea20ee3d8 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -4984,7 +4984,7 @@ enum mipi_port_id __get_mipi_port(struct atomisp_device *isp, if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID) { return MIPI_PORT1_ID + 1; } - /* fall through */ + fallthrough; default: dev_err(isp->dev, "unsupported port: %d\n", port); return MIPI_PORT0_ID; diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c index cccc5bfa105732f117992341e471ed44870e7436..1b2b2c68025b4cc6643c40942925cc8948c69a71 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c @@ -704,14 +704,14 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd, return false; } - /* fall-through */ + fallthrough; case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE: if (pipe_id == IA_CSS_PIPE_ID_CAPTURE || pipe_id == IA_CSS_PIPE_ID_PREVIEW) return true; return false; - /* fall-through */ + fallthrough; case ATOMISP_RUN_MODE_VIDEO: if (!asd->continuous_mode->val) { if (pipe_id == IA_CSS_PIPE_ID_VIDEO || @@ -720,7 +720,7 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd, else return false; } - /* fall through */ + fallthrough; case ATOMISP_RUN_MODE_SDV: if (pipe_id == IA_CSS_PIPE_ID_CAPTURE || pipe_id == IA_CSS_PIPE_ID_VIDEO) @@ -2765,7 +2765,7 @@ static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd, if (!atomisp_is_mbuscode_raw(asd->fmt[asd->capture_pad].fmt.code)) { return IA_CSS_PIPE_ID_CAPTURE; } - /* fall through */ + fallthrough; case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: if (asd->yuvpp_mode) return IA_CSS_PIPE_ID_YUVPP; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index f8d616f08b510fe093b9fe6e47683c2bb4b63137..65b0c8a662a06f267b739cc1af89e64586c81495 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -1467,7 +1467,6 @@ enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd) case ATOMISP_RUN_MODE_VIDEO: return IA_CSS_PIPE_ID_VIDEO; case ATOMISP_RUN_MODE_STILL_CAPTURE: - /* fall through */ default: return IA_CSS_PIPE_ID_CAPTURE; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index a000a1e316f78d705f320cefd8f855b017ccd655..0114b040247b986abfc9babea458dd68a037db2f 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -1086,7 +1086,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp) case RAW_CAMERA: dev_dbg(isp->dev, "raw_index: %d\n", raw_index); raw_index = isp->input_cnt; - /* fall through */ + fallthrough; case SOC_CAMERA: dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt); if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) { diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c index 4fb9bfdd2f4cee1506fce817d214b5a67c490143..f13af2329f4860a7bacdee9f5410ba6e26fbce5f 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c @@ -660,7 +660,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo, break; } - /* fall through */ + fallthrough; /* * if dynamic memory pool doesn't exist, need to free diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index 54434c2dbaf904d8037b8f709452ede91ee8e744..a68cbb4995f0f3e02b63a84cafb384c274b02aad 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -4510,7 +4510,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe, #endif pipe->stop_requested = false; } - /* fall through */ + fallthrough; case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr); diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c index 24041849384a4bd5189b57b856683e416d79b14f..6386a3989bfe9aaea9d243ee9b1777c908483eff 100644 --- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c +++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c @@ -110,7 +110,7 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx, case V4L2_MPEG2_PICTURE_CODING_TYPE_B: backward_addr = hantro_get_ref(ctx, slice_params->backward_ref_ts); - /* fall-through */ + fallthrough; case V4L2_MPEG2_PICTURE_CODING_TYPE_P: forward_addr = hantro_get_ref(ctx, slice_params->forward_ref_ts); diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c index 7e9aad67148963c5dc36f472bc7890005d5db808..f610fa5b43354bb9c7761ab90faa7273c0f8ffe2 100644 --- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c +++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c @@ -112,7 +112,7 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu, case V4L2_MPEG2_PICTURE_CODING_TYPE_B: backward_addr = hantro_get_ref(ctx, slice_params->backward_ref_ts); - /* fall-through */ + fallthrough; case V4L2_MPEG2_PICTURE_CODING_TYPE_P: forward_addr = hantro_get_ref(ctx, slice_params->forward_ref_ts); diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c index d92fd804488e907d5a29ea55a85ec576b294187b..21ebf77696964f4da12004df69947d44a8cae381 100644 --- a/drivers/staging/media/imx/imx-media-csi.c +++ b/drivers/staging/media/imx/imx-media-csi.c @@ -488,7 +488,7 @@ static int csi_idmac_setup_channel(struct csi_priv *priv) passthrough_cycles = incc->cycles; break; } - /* fallthrough - non-passthrough RGB565 (CSI-2 bus) */ + fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */ default: burst_size = (image.pix.width & 0xf) ? 8 : 16; passthrough_bits = 16; diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index bc27f9430eeb14712169a16f9807ee6536a4d5b1..7c6b91f0e780a303a60f623d702d3baae12d497d 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c @@ -199,6 +199,7 @@ static int cedrus_request_validate(struct media_request *req) struct v4l2_ctrl *ctrl_test; unsigned int count; unsigned int i; + int ret = 0; list_for_each_entry(obj, &req->objects, list) { struct vb2_buffer *vb; @@ -243,12 +244,16 @@ static int cedrus_request_validate(struct media_request *req) if (!ctrl_test) { v4l2_info(&ctx->dev->v4l2_dev, "Missing required codec control\n"); - return -ENOENT; + ret = -ENOENT; + break; } } v4l2_ctrl_request_hdl_put(hdl); + if (ret) + return ret; + return vb2_request_validate(req); } diff --git a/drivers/staging/media/usbvision/usbvision-i2c.c b/drivers/staging/media/usbvision/usbvision-i2c.c index 6e4df3335b1befd0da948428d58aca0593f5fbcd..aa3ff67a3cb1d3e02977691c963a3586ccabb61d 100644 --- a/drivers/staging/media/usbvision/usbvision-i2c.c +++ b/drivers/staging/media/usbvision/usbvision-i2c.c @@ -303,13 +303,13 @@ usbvision_i2c_read_max4(struct usb_usbvision *usbvision, unsigned char addr, switch (len) { case 4: buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4); - /* fall through */ + fallthrough; case 3: buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3); - /* fall through */ + fallthrough; case 2: buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2); - /* fall through */ + fallthrough; case 1: buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1); break; diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index fa1bf8b069fda5898b485dde6210ef7ebbea0b2c..2720f7319a3d03bb4bbda3eca700595ddd79863d 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -524,13 +524,8 @@ static void hfa384x_usb_defer(struct work_struct *data) */ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) { - memset(hw, 0, sizeof(*hw)); hw->usb = usb; - /* set up the endpoints */ - hw->endp_in = usb_rcvbulkpipe(usb, 1); - hw->endp_out = usb_sndbulkpipe(usb, 2); - /* Set up the waitq */ init_waitqueue_head(&hw->cmdq); diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 456603fd26c0b9892d902927a8776c5cc9db8b03..4b08dc1da4f97bb88435d195a206f3f96cffe884 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - const struct usb_endpoint_descriptor *epd; - const struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; - if (iface_desc->desc.bNumEndpoints != 2) { - result = -ENODEV; - goto failed; - } - - result = -EINVAL; - epd = &iface_desc->endpoint[1].desc; - if (!usb_endpoint_is_bulk_in(epd)) - goto failed; - epd = &iface_desc->endpoint[2].desc; - if (!usb_endpoint_is_bulk_out(epd)) + result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); + if (result) goto failed; dev = interface_to_usbdev(interface); @@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, } /* Initialize the hw data */ + hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); + hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); hfa384x_create(hw, dev); hw->wlandev = wlandev; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 30ea37e1a3f5e11021967201bd3f197030259050..bd37f2afadeaff98968274cf3c017d85dff8232d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -444,7 +444,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, case CPL_RX_ISCSI_DDP: case CPL_FW4_ACK: lro_flush = false; - /* fall through */ + fallthrough; case CPL_ABORT_RPL_RSS: case CPL_PASS_ESTABLISH: case CPL_PEER_CLOSE: diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c9689610e186d6af9247400fd6f25a7bcf2b35bc..7b56fe9f10628b2c122643e5bff9fbc1b93a323a 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1389,14 +1389,27 @@ static u32 iscsit_do_crypto_hash_sg( sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + while (data_length) { - u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); + u32 cur_len = min_t(u32, data_length, sg->length); ahash_request_set_crypt(hash, sg, NULL, cur_len); crypto_ahash_update(hash); data_length -= cur_len; - page_off = 0; /* iscsit_map_iovec has already checked for invalid sg pointers */ sg = sg_next(sg); } @@ -3740,7 +3753,7 @@ iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) case ISTATE_SEND_LOGOUTRSP: if (!iscsit_logout_post_handler(cmd, conn)) return -ECONNRESET; - /* fall through */ + fallthrough; case ISTATE_SEND_STATUS: case ISTATE_SEND_ASYNCMSG: case ISTATE_SEND_NOPIN: diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 85748e3388582156dfb8b35a14c499b05edc9033..893d1b406c29254ca0bb33fa19b4bcadd0253192 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1149,7 +1149,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) } void iscsi_target_login_sess_out(struct iscsi_conn *conn, - struct iscsi_np *np, bool zero_tsih, bool new_sess) + bool zero_tsih, bool new_sess) { if (!new_sess) goto old_sess_out; @@ -1167,7 +1167,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, conn->sess = NULL; old_sess_out: - iscsi_stop_login_thread_timer(np); /* * If login negotiation fails check if the Time2Retain timer * needs to be restarted. @@ -1407,8 +1406,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: + iscsi_stop_login_thread_timer(np); tpg_np = conn->tpg_np; - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; if (tpg) { diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3b8e3639ff5d01c6dbeb31ffb1201bb2453a822c..fc95e6150253f6ed744bb9ae955960b8bb1a3230 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); extern int iscsi_target_login_thread(void *); extern void iscsi_handle_login_thread_timeout(struct timer_list *t); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index f88a52fec88952c74515f4ad4beaedbec2252cd6..8b40f10976ff8a6d9de20a23071ec6d554d828b2 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) { - struct iscsi_np *np = login->np; bool zero_tsih = login->zero_tsih; iscsi_remove_failed_auth_entry(conn); iscsi_target_nego_release(conn); - iscsi_target_login_sess_out(conn, np, zero_tsih, true); + iscsi_target_login_sess_out(conn, zero_tsih, true); } struct conn_timeout { diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8fc88654bff63c0f90341b5f6e0578d358d5a3f6..5f79ea05f9b815ce9ced1f7a6c670325cebf6c18 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -345,7 +345,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, break; case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: we = 1; - /* fall through */ + fallthrough; case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: /* * Some commands are only allowed for registered I_T Nexuses. @@ -354,7 +354,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, break; case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: we = 1; - /* fall through */ + fallthrough; case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: /* * Each registered I_T Nexus is a reservation holder. diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index f1e81886122d0ade6ba6438bc7e77df5bce1b8ae..6e8b8d30938f6f12509578406537147b541fccfb 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -734,7 +734,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, } if (!protect) return TCM_NO_SENSE; - /* Fallthrough */ + fallthrough; default: pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " "PROTECT: 0x%02x\n", cdb[0], protect); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9fb0be0aa620438babaaf1e1664cc4ad7aa05b1d..590eac2df90904cae2f7aa51dac26308be60cfb4 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2236,7 +2236,7 @@ static void transport_complete_qf(struct se_cmd *cmd) ret = cmd->se_tfo->queue_data_in(cmd); break; } - /* fall through */ + fallthrough; case DMA_NONE: queue_status: trace_target_cmd_complete(cmd); @@ -2431,7 +2431,7 @@ static void target_complete_ok_work(struct work_struct *work) goto queue_full; break; } - /* fall through */ + fallthrough; case DMA_NONE: queue_status: trace_target_cmd_complete(cmd); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index e9f0dda5ff92b36991f6aef37274d23d3fd961f0..a7ed56602c6cd20dbd425d915419c6e7abae6ff8 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -537,7 +537,7 @@ static void ft_send_work(struct work_struct *work) case FCP_PTA_ACA: task_attr = TCM_ACA_TAG; break; - case FCP_PTA_SIMPLE: /* Fallthrough */ + case FCP_PTA_SIMPLE: default: task_attr = TCM_SIMPLE_TAG; } diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c index bf7bae42c141c51493c240e297c6b226f38b232a..6dc879fea9c8a8d4c09705bbb02cec0a54f339e1 100644 --- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c +++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved. */ #include @@ -191,7 +191,7 @@ static int qpnp_tm_get_temp(void *data, int *temp) chip->temp = mili_celsius; } - *temp = chip->temp < 0 ? 0 : chip->temp; + *temp = chip->temp; return 0; } diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index e64db5f80d909a8ad980c177986b5f069168b45b..4ffa2e2c0145777d0f1d65f0f4f0c9ad99486f56 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -220,7 +220,7 @@ static int calibrate_8916(struct tsens_priv *priv) p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT; for (i = 0; i < priv->num_sensors; i++) p2[i] = ((base1 + p2[i]) << 3); - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK); p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT; @@ -355,7 +355,7 @@ static int calibrate_8974(struct tsens_priv *priv) p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT; p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT; p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT; - /* Fall through */ + fallthrough; case ONE_PT_CALIB: case ONE_PT_CALIB2: base1 = bkp[0] & BASE1_MASK; @@ -390,7 +390,7 @@ static int calibrate_8974(struct tsens_priv *priv) p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT; p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT; p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT; - /* Fall through */ + fallthrough; case ONE_PT_CALIB: case ONE_PT_CALIB2: base1 = calib[0] & BASE1_MASK; @@ -420,7 +420,7 @@ static int calibrate_8974(struct tsens_priv *priv) p2[i] <<= 2; p2[i] |= BIT_APPEND; } - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: for (i = 0; i < priv->num_sensors; i++) { p1[i] += base1; diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index b682a4df00810c6f92085babad1b6f567849baac..3c19a3800c6d685425f35c46d0a7545bc78749ac 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -202,7 +202,7 @@ static int calibrate_v1(struct tsens_priv *priv) p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT; for (i = 0; i < priv->num_sensors; i++) p2[i] = ((base1 + p2[i]) << 2); - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT; p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT; @@ -263,7 +263,7 @@ static int calibrate_8976(struct tsens_priv *priv) for (i = 0; i < priv->num_sensors; i++) p2[i] = ((base1 + p2[i]) << 2); - /* Fall through */ + fallthrough; case ONE_PT_CALIB2: base0 = qfprom_cdata[0] & MSM8976_BASE0_MASK; p1[0] = (qfprom_cdata[0] & MSM8976_S0_P1_MASK) >> MSM8976_S0_P1_SHIFT; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 72bf159bceccd83c187bb114ad9eacb175dfa6a0..a6616e530a84d2d87bcb1dbc4d78026827017369 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1516,7 +1516,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_device_register); */ void thermal_zone_device_unregister(struct thermal_zone_device *tz) { - int i; + int i, tz_id; const struct thermal_zone_params *tzp; struct thermal_cooling_device *cdev; struct thermal_zone_device *pos = NULL; @@ -1525,6 +1525,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) return; tzp = tz->tzp; + tz_id = tz->id; mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) @@ -1567,7 +1568,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) mutex_destroy(&tz->lock); device_unregister(&tz->device); - thermal_notify_tz_delete(tz->id); + thermal_notify_tz_delete(tz_id); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c index 63b02bfb2adf65ccdc913621c7df3863ae185326..fdb8a495ab69a8b0c2ad35f8ee56e0bc9c947ccf 100644 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c @@ -37,20 +37,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { /* * Temperature values in milli degree celsius - * ADC code values from 530 to 923 + * ADC code values from 13 to 107, see TRM + * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, - 117000, 118000, 120000, 122000, 123000, + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, + 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h index a453ff8eb313e185895208ac4855639a388ef011..9a3955c3853ba112dbf12bf1d006e80b2f3718e6 100644 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h @@ -53,9 +53,13 @@ * and thresholds for OMAP4430. */ -/* ADC conversion table limits */ -#define OMAP4430_ADC_START_VALUE 0 -#define OMAP4430_ADC_END_VALUE 127 +/* + * ADC conversion table limits. Ignore values outside the TRM listed + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter + * "18.4.10.2.3 ADC Codes Versus Temperature". + */ +#define OMAP4430_ADC_START_VALUE 13 +#define OMAP4430_ADC_END_VALUE 107 /* bandgap clock limits (no control on 4430) */ #define OMAP4430_MAX_FREQ 32768 #define OMAP4430_MIN_FREQ 32768 diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index f77ceae5c7d789d48aecaa9c242408a161c191bf..394a23ce6ca4765b3571d678b4574813acbca2d1 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -453,7 +453,7 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, "RX: checksum mismatch, dropping packet\n"); goto rx; } - /* Fall through */ + fallthrough; case TB_CFG_PKG_ICM_EVENT: if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) goto rx; diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 712395f518b82225e06f58699f23c2fca470cb4d..a921de9ce7cbe2277244215413dfaff377123852 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -684,6 +684,7 @@ static int tb_init_port(struct tb_port *port) if (res == -ENODEV) { tb_dbg(port->sw->tb, " Port %d: not implemented\n", port->port); + port->disabled = true; return 0; } return res; @@ -2092,7 +2093,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (tb_route(sw)) return 0; - /* fallthrough */ + fallthrough; case 3: ret = tb_switch_set_uuid(sw); if (ret) diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index a413d55b5f8b3d40aff34f0db6fab9dd98a63c9e..3c620a9203c5a434b561fe253ef32938b578af90 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -186,7 +186,7 @@ struct tb_switch { * @cap_adap: Offset of the adapter specific capability (%0 if not present) * @cap_usb4: Offset to the USB4 port capability (%0 if not present) * @port: Port number on switch - * @disabled: Disabled by eeprom + * @disabled: Disabled by eeprom or enabled but not implemented * @bonded: true if the port is bonded (two lanes combined as one) * @dual_link_port: If the switch is connected using two ports, points * to the other port. diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 2aae2c76d880dd49074b2e040627523691f3df94..829b6ccdd5d4f335120934a525181dd277fdd17d 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -315,7 +315,7 @@ static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) switch (rate) { default: WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); - /* Fallthrough */ + fallthrough; case 1620: val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; break; @@ -355,7 +355,7 @@ static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) default: WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", lanes); - /* Fallthrough */ + fallthrough; case 1: val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; break; @@ -951,10 +951,18 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, int ret, max_rate, allocate_up, allocate_down; ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); - if (ret <= 0) { - tb_tunnel_warn(tunnel, "tunnel is not up\n"); + if (ret < 0) { + tb_tunnel_warn(tunnel, "failed to read actual link rate\n"); return; + } else if (!ret) { + /* Use maximum link rate if the link valid is not set */ + ret = usb4_usb3_port_max_link_rate(tunnel->src_port); + if (ret < 0) { + tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); + return; + } } + /* * 90% of the max rate can be allocated for isochronous * transfers. diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 2a0e51a20e342911748ba524d19c03fc99af59fd..92c9a476defc998f5cf8dfd6185127cf68b479b5 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -492,7 +492,7 @@ static void xencons_backend_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state. */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 21e76a2ec1820ccaee4d227239823e35f849a992..a8e19b4833bf9a95a0f95412d2241bdbb071f2f5 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -243,7 +243,7 @@ static struct fdc_word mips_ejtag_fdc_encode(const char **ptrs, /* Fall back to a 3 byte encoding */ word.bytes = 3; word.word &= 0x00ffffff; - /* Fall through */ + fallthrough; case 3: /* 3 byte encoding */ word.word |= 0x82000000; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 0a29a94ec438e74f2b4e79c255f4507da90f2afd..35cf12147e39c1b87a820738d347e12d53945ce7 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1584,7 +1584,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) gsm_process_modem(tty, dlci, modem, clen); tty_kref_put(tty); } - /* Fall through */ + fallthrough; case 1: /* Line state will go via DLCI 0 controls only */ default: tty_insert_flip_string(port, data, len); @@ -1986,7 +1986,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) gsm->address = 0; gsm->state = GSM_ADDRESS; gsm->fcs = INIT_FCS; - /* Fall through */ + fallthrough; case GSM_ADDRESS: /* Address continuation */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index b09eac4b6d64f0ae01076a6f6d39164dc435a14b..8e975cb29833adfef8cca81622c48868aa3db756 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -602,7 +602,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, case TCOFLUSH: flush_tx_queue(tty); } - /* fall through - to default */ + fallthrough; /* to default */ default: error = n_tty_ioctl_helper(tty, file, cmd, arg); diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index f75696f0ee2d03c83de6ee7af0ae93bd93440c4a..934dd2fb2ec8072d50fef3324ee12852a063f869 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -605,7 +605,6 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c) } break; case R3964_WAIT_FOR_RX_REPEAT: - /* FALLTHROUGH */ case R3964_IDLE: if (c == STX) { /* Prevent rx_queue from overflow: */ diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c index db88dee3a3999a34fcdff1ccc193e315e1dc219c..f8e99995eee911de69ceb5eb8d95dec876cb1acd 100644 --- a/drivers/tty/serial/8250/8250_em.c +++ b/drivers/tty/serial/8250/8250_em.c @@ -39,7 +39,7 @@ static void serial8250_em_serial_out(struct uart_port *p, int offset, int value) break; case UART_IER: /* IER @ 0x04 */ value &= 0x0f; /* only 4 valid bits - not Xscale */ - /* fall-through */ + fallthrough; case UART_DLL_EM: /* DLL @ 0x24 (+9) */ case UART_DLM_EM: /* DLM @ 0x28 (+9) */ writel(value, p->membase + (offset << 2)); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 04b9af7ed94153dac7e045bce7d8e11132f3e057..2d0e7c7e408dc9722181d51ee6415daa3e2e7d52 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -744,6 +744,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -811,9 +829,9 @@ static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x), EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358), EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358), - EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_exar_XR17V35x), + EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_fastcom35x_8), EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2), EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4), diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index d1d253c4b518bc724ebe7fa3498a057604b3440d..31c9e83ea3cb2c5bcf62aea19e8957d7fff284a4 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -255,7 +255,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level) case CHIP_ID_F81866: sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1, 0); - /* fall through */ + fallthrough; case CHIP_ID_F81865: sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE, F81866_IRQ_SHARE); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 1a74d511b02a580bbbdf033e7d9269e78c602def..3eb2d485eaeb0d6e36d2d3dc766bdb0323451a28 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -631,7 +631,7 @@ pci_timedia_setup(struct serial_private *priv, break; case 3: offset = board->uart_offset; - /* FALLTHROUGH */ + fallthrough; case 4: /* BAR 2 */ case 5: /* BAR 3 */ case 6: /* BAR 4 */ diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 09475695effd472dd027922b5642d189a23921ec..c71d647eb87a0214137b136d8e01c7fb9fddb197 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1872,7 +1872,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) switch (iir & 0x3f) { case UART_IIR_RX_TIMEOUT: serial8250_rx_dma_flush(up); - /* fall-through */ + fallthrough; case UART_IIR_RLSI: return true; } @@ -2275,6 +2275,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already @@ -2284,8 +2288,6 @@ int serial8250_do_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&port->lock, flags); - if (up->port.irqflags & IRQF_SHARED) - disable_irq_nosync(port->irq); wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); @@ -2297,9 +2299,10 @@ int serial8250_do_startup(struct uart_port *port) iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + if (port->irqflags & IRQF_SHARED) enable_irq(port->irq); - spin_unlock_irqrestore(&port->lock, flags); /* * If the interrupt is not reasserted, or we otherwise diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index e0b73a5402db71048565aa809aca25ef8559e381..a2978abab0db6032b1cae214c5ba0da575bd0542 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -75,7 +75,7 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset) break; case UART_LCR: valshift = 8; - /* fall through */ + fallthrough; case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; break; @@ -101,7 +101,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) case UART_SCR: /* No SCR for this hardware. Use CHAR as a scratch register */ valshift = 8; - /* fall through */ + fallthrough; case UART_FCR: offset = UNIPHIER_UART_CHAR_FCR; break; @@ -109,7 +109,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) valshift = 8; /* Divisor latch access bit does not exist. */ value &= ~UART_LCR_DLAB; - /* fall through */ + fallthrough; case UART_MCR: offset = UNIPHIER_UART_LCR_MCR; break; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 8a0352eb337c84ea5c9dd318edd7726b3946f583..9409be982aa64c1adc1359ec87c31c09c42efe63 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -517,6 +517,7 @@ config SERIAL_IMX_CONSOLE config SERIAL_IMX_EARLYCON bool "Earlycon on IMX serial port" + depends on ARCH_MXC || COMPILE_TEST depends on OF select SERIAL_EARLYCON help diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index d056ee6cca33edeb7723dce2cb7957b8deff8104..caf167f0c10a6238417184606973ea7ffc71be5b 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_SERIAL_ZS) += zs.o obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o obj-$(CONFIG_SERIAL_CPM) += cpm_uart/ obj-$(CONFIG_SERIAL_IMX) += imx.o +obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o obj-$(CONFIG_SERIAL_ICOM) += icom.o obj-$(CONFIG_SERIAL_MESON) += meson_uart.o diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index c010f639298dd17a451e5606c69b525417e08529..67498594d7d7e61a56ba4a28275327f652aec3ce 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2241,9 +2241,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_disable(uap->clk); } -static void __init -pl011_console_get_options(struct uart_amba_port *uap, int *baud, - int *parity, int *bits) +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) { if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; @@ -2276,7 +2275,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, } } -static int __init pl011_console_setup(struct console *co, char *options) +static int pl011_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; @@ -2344,8 +2343,8 @@ static int __init pl011_console_setup(struct console *co, char *options) * * Returns 0 if console matches; otherwise non-zero to use default matching */ -static int __init pl011_console_match(struct console *co, char *name, int idx, - char *options) +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) { unsigned char iotype; resource_size_t addr; @@ -2615,7 +2614,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, static int pl011_register_port(struct uart_amba_port *uap) { - int ret; + int ret, i; /* Ensure interrupts from this UART are masked and cleared */ pl011_write(0, uap, REG_IMSC); @@ -2626,6 +2625,9 @@ static int pl011_register_port(struct uart_amba_port *uap) if (ret < 0) { dev_err(uap->port.dev, "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; return ret; } } diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index e43471b33710b3885dd2b1ec74d68cdec70a3901..bb5fc8bdd57a6f06e118517a68ab2e078685c92a 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1845,7 +1845,7 @@ static void atmel_get_ip_name(struct uart_port *port) version = atmel_uart_readl(port, ATMEL_US_VERSION); switch (version) { case 0x814: /* sama5d2 */ - /* fall through */ + fallthrough; case 0x701: /* sama5d4 */ atmel_port->fidi_min = 3; atmel_port->fidi_max = 65535; diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 8573fc9cb0cdf58abe4e3dc59e6c8a72845d375d..76b94d0ff58659472f286a2ed526c96e9d1ab8a6 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -587,7 +587,6 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id) transmit_chars(up, lsr); break; case UART_IIR_RX_TIMEOUT: - /* FALLTHROUGH */ case UART_IIR_RDI: serial_omap_rdi(up, lsr); break; @@ -598,7 +597,6 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id) /* simply try again */ break; case UART_IIR_XOFF: - /* FALLTHROUGH */ default: break; } diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 3aa29d201f54d38e1d24f63b883679d4a50ffa9a..184b458820a313cb0412b8ed13e0981976b3d8c1 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -361,11 +361,16 @@ static int qcom_geni_serial_get_char(struct uart_port *uport) return NO_POLL_CHAR; if (word_cnt == 1 && (status & RX_LAST)) + /* + * NOTE: If RX_LAST_BYTE_VALID is 0 it needs to be + * treated as if it was BYTES_PER_FIFO_WORD. + */ private_data->poll_cached_bytes_cnt = (status & RX_LAST_BYTE_VALID_MSK) >> RX_LAST_BYTE_VALID_SHFT; - else - private_data->poll_cached_bytes_cnt = 4; + + if (private_data->poll_cached_bytes_cnt == 0) + private_data->poll_cached_bytes_cnt = BYTES_PER_FIFO_WORD; private_data->poll_cached_bytes = readl(uport->membase + SE_GENI_RX_FIFOn); @@ -1098,7 +1103,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport) } #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE -static int __init qcom_geni_console_setup(struct console *co, char *options) +static int qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index b5ef86ae2746e4b3b3810e26345cf8f4e1b264c5..85366e05925850dbd8d2c603893a0433a19ccf29 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -259,7 +259,7 @@ static void rda_uart_set_termios(struct uart_port *port, case CS5: case CS6: dev_warn(port->dev, "bit size not supported, using 7 bits\n"); - /* Fall through */ + fallthrough; case CS7: ctrl &= ~RDA_UART_DBITS_8; break; diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index 8ed3482d2e1ebd44616e7e3dc060c6fd3734b7bc..8ae3e03fbd8ce625742782950af00eebbf698c64 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -1905,9 +1905,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret + 1; } - ret = platform_get_irq(platdev, 1); - if (ret > 0) - ourport->tx_irq = ret; + if (!s3c24xx_serial_has_interrupt_mask(port)) { + ret = platform_get_irq(platdev, 1); + if (ret > 0) + ourport->tx_irq = ret; + } /* * DMA is currently supported only on DT platforms, if DMA properties * are specified. diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index b87914ae6da8f27de4f430affd7225e3776b6126..bd13014a1c53758fd66c3ab985b3825029ec21ce 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -876,7 +876,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data) tegra_uart_write(tup, ier, UART_IER); break; } - /* Fall through */ + fallthrough; case 2: /* Receive */ if (!tup->use_rx_pio) { is_rx_start = tup->rx_in_progress; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 3403dd79051737b50a6f5849eddb36bdee63241d..f797c971cd82feb0b26eea5becf86ce2b77b6a3d 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2101,7 +2101,7 @@ uart_set_options(struct uart_port *port, struct console *co, switch (parity) { case 'o': case 'O': termios.c_cflag |= PARODD; - /*fall through*/ + fallthrough; case 'e': case 'E': termios.c_cflag |= PARENB; break; diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 143300a8009098f946740357a2a51c9851eba054..ba503dd04ce2dc640c6618bf19c12b8fe3cf0873 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -970,7 +970,7 @@ static int stm32_init_port(struct stm32_port *stm32port, return ret; if (stm32port->info->cfg.has_wakeup) { - stm32port->wakeirq = platform_get_irq(pdev, 1); + stm32port->wakeirq = platform_get_irq_optional(pdev, 1); if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO) return stm32port->wakeirq ? : -ENODEV; } diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 8ce9a7a256e56b7860d57be20087512f6c0854e4..319e5ceb6130e2bee8a1ccc60e6a590c83a68ef9 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -514,7 +514,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break) switch (ret) { case 2: sunsu_change_mouse_baud(up); - /* fallthru */ + fallthrough; case 1: break; diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index 7ea06bbc61973e501c449e517f561f0865819b39..001e19d7c17db51017068f2d831e0d2cd29713e8 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -306,7 +306,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up, switch (ret) { case 2: sunzilog_change_mouse_baud(up); - /* fallthru */ + fallthrough; case 1: break; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 2833f1418d6d9570ac9ce4f1e3449055e09ddd93..a9b1ee27183a7105537c90140f9e7ce5969246c7 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -544,7 +544,7 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb, cdns_uart->baud = cdns_uart_set_baud_rate(cdns_uart->port, cdns_uart->baud); - /* fall through */ + fallthrough; case ABORT_RATE_CHANGE: if (!locked) spin_lock_irqsave(&cdns_uart->port->lock, flags); diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 9245fffdbceb170c1e686c093194f48d7f50ac49..e18f318586ab4c362c25e6f46a6499921a40169b 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -866,7 +866,7 @@ static int __tty_perform_flush(struct tty_struct *tty, unsigned long arg) ld->ops->flush_buffer(tty); tty_unthrottle(tty); } - /* fall through */ + fallthrough; case TCOFLUSH: tty_driver_flush_buffer(tty); break; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ccb533fd00a29aad764a2634ce4ba8fc2f005efd..19cd4a4b193999519e445d90e1c586c6c3fb39fe 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1201,7 +1201,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned int user; - unsigned short *newscreen; + unsigned short *oldscreen, *newscreen; struct uni_screen *new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); @@ -1299,10 +1299,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - kfree(vc->vc_screenbuf); + oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); + kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; @@ -1553,7 +1554,7 @@ static void csi_J(struct vc_data *vc, int vpar) break; case 3: /* include scrollback */ flush_scrollback(vc); - /* fallthrough */ + fallthrough; case 2: /* erase whole display */ vc_uniscr_clear_lines(vc, 0, vc->vc_rows); count = vc->vc_cols * vc->vc_rows; @@ -2167,7 +2168,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); if (!is_kbd(vc, lnm)) return; - /* fall through */ + fallthrough; case 13: cr(vc); return; @@ -2306,7 +2307,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; } vc->vc_priv = EPecma; - /* fall through */ + fallthrough; case ESgetpars: if (c == ';' && vc->vc_npar < NPAR - 1) { vc->vc_npar++; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 91c3017750476d73ec9b193b9b8a7fe1995624fe..a4e520bdd521df3994284450253f622e8bc3c7d4 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -806,12 +806,22 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs) console_lock(); vcp = vc_cons[i].d; if (vcp) { + int ret; + int save_scan_lines = vcp->vc_scan_lines; + int save_font_height = vcp->vc_font.height; + if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) vcp->vc_font.height = v.v_clin; vcp->vc_resize_user = 1; - vc_resize(vcp, v.v_cols, v.v_rows); + ret = vc_resize(vcp, v.v_cols, v.v_rows); + if (ret) { + vcp->vc_scan_lines = save_scan_lines; + vcp->vc_font.height = save_font_height; + console_unlock(); + return ret; + } } console_unlock(); } diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index f7f6229082cad4a86d703e5beb545040bdeac256..60f4711717d22e07446aeb2d635327cc5a709a84 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -710,7 +710,8 @@ static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb) if (ret) return ret; break; - } /* else fallthrough */ + } + fallthrough; case STATUS_STAGE: pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN; ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1, diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 991786876dbb3cb598c44c1ccb0c250d55c16e08..7f6f3ab5b8a6770060cae81445aecd9e88d1cc48 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb) if (current_size < expected_size) { /* notification is transmitted fragmented, reassemble */ if (acm->nb_size < expected_size) { - if (acm->nb_size) { - kfree(acm->notification_buffer); - acm->nb_size = 0; - } + u8 *new_buffer; alloc_size = roundup_pow_of_two(expected_size); - /* - * kmalloc ensures a valid notification_buffer after a - * use of kfree in case the previous allocation was too - * small. Final freeing is done on disconnect. - */ - acm->notification_buffer = - kmalloc(alloc_size, GFP_ATOMIC); - if (!acm->notification_buffer) + /* Final freeing is done on disconnect. */ + new_buffer = krealloc(acm->notification_buffer, + alloc_size, GFP_ATOMIC); + if (!new_buffer) { + acm->nb_index = 0; goto exit; + } + + acm->notification_buffer = new_buffer; acm->nb_size = alloc_size; + dr = (struct usb_cdc_notification *)acm->notification_buffer; } copy_size = min(current_size, diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index f81606c6a35b03b92b7d0da2976c2e7a3800dfa7..7e73e989645bd2d517693707f8cf69d138a282a4 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -905,6 +905,35 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } +static bool is_dev_usb_generic_driver(struct device *dev) +{ + struct usb_device_driver *udd = dev->driver ? + to_usb_device_driver(dev->driver) : NULL; + + return udd == &usb_generic_driver; +} + +static int __usb_bus_reprobe_drivers(struct device *dev, void *data) +{ + struct usb_device_driver *new_udriver = data; + struct usb_device *udev; + int ret; + + if (!is_dev_usb_generic_driver(dev)) + return 0; + + udev = to_usb_device(dev); + if (usb_device_match_id(udev, new_udriver->id_table) == NULL && + (!new_udriver->match || new_udriver->match(udev) != 0)) + return 0; + + ret = device_reprobe(dev); + if (ret && ret != -EPROBE_DEFER) + dev_err(dev, "Failed to reprobe device (error %d)\n", ret); + + return 0; +} + /** * usb_register_device_driver - register a USB device (not interface) driver * @new_udriver: USB operations for the device driver @@ -934,13 +963,20 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver, retval = driver_register(&new_udriver->drvwrap.driver); - if (!retval) + if (!retval) { pr_info("%s: registered new device driver %s\n", usbcore_name, new_udriver->name); - else + /* + * Check whether any device could be better served with + * this new driver + */ + bus_for_each_dev(&usb_bus_type, NULL, new_udriver, + __usb_bus_reprobe_drivers); + } else { printk(KERN_ERR "%s: error %d registering device " " driver %s\n", usbcore_name, retval, new_udriver->name); + } return retval; } diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index b6f2d4b447546ed5a44366bfbc7e375f09ba761c..2b2f1ab6e36aafca243d3203666d6790644340a5 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -205,8 +205,9 @@ static int __check_usb_generic(struct device_driver *drv, void *data) udrv = to_usb_device_driver(drv); if (udrv == &usb_generic_driver) return 0; - - return usb_device_match_id(udev, udrv->id_table) != NULL; + if (usb_device_match_id(udev, udrv->id_table) != NULL) + return 1; + return (udrv->match && udrv->match(udev)); } static bool usb_generic_driver_match(struct usb_device *udev) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 4dc443aaef5c624526624b178b57e73318efd3cc..ec0d6c50610cef42f8c34ba6f815668761b6a70e 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -315,11 +315,14 @@ EXPORT_SYMBOL_GPL(usb_hcd_pci_probe); void usb_hcd_pci_remove(struct pci_dev *dev) { struct usb_hcd *hcd; + int hcd_driver_flags; hcd = pci_get_drvdata(dev); if (!hcd) return; + hcd_driver_flags = hcd->driver->flags; + if (pci_dev_run_wake(dev)) pm_runtime_get_noresume(&dev->dev); @@ -347,7 +350,7 @@ void usb_hcd_pci_remove(struct pci_dev *dev) up_read(&companions_rwsem); } usb_put_hcd(hcd); - if ((hcd->driver->flags & HCD_MASK) < HCD_USB3) + if ((hcd_driver_flags & HCD_MASK) < HCD_USB3) pci_free_irq_vectors(dev); pci_disable_device(dev); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 052d5accfe9b2bc077e0b877efdce60a2aafd95b..5b768b80d1eeb56190bb581ac561f80f730e97f0 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -727,7 +727,7 @@ static void hub_irq(struct urb *urb) if ((++hub->nerrors < 10) || hub->error) goto resubmit; hub->error = status; - /* FALL THROUGH */ + fallthrough; /* let hub_wq handle things */ case 0: /* we got data: port status changed */ diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 6197938dcc2d8f104efbd8e923bbc6e3d0b94c7b..ae1de9cc4b094cad6cfe00c0c58d28a4a81b4dd4 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1205,6 +1205,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, } } +/* + * usb_disable_device_endpoints -- Disable all endpoints for a device + * @dev: the device whose endpoints are being disabled + * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. + */ +static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) +{ + struct usb_hcd *hcd = bus_to_hcd(dev->bus); + int i; + + if (hcd->driver->check_bandwidth) { + /* First pass: Cancel URBs, leave endpoint pointers intact. */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, false); + usb_disable_endpoint(dev, i + USB_DIR_IN, false); + } + /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); + } + /* Second pass: remove endpoint pointers */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, true); + usb_disable_endpoint(dev, i + USB_DIR_IN, true); + } +} + /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled @@ -1218,7 +1246,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; - struct usb_hcd *hcd = bus_to_hcd(dev->bus); /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) @@ -1264,22 +1291,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); - if (hcd->driver->check_bandwidth) { - /* First pass: Cancel URBs, leave endpoint pointers intact. */ - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, false); - usb_disable_endpoint(dev, i + USB_DIR_IN, false); - } - /* Remove endpoints from the host controller internal state */ - mutex_lock(hcd->bandwidth_mutex); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - /* Second pass: remove endpoint pointers */ - } - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + + usb_disable_device_endpoints(dev, skip_ep0); } /** @@ -1522,6 +1535,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface); * The caller must own the device lock. * * Return: Zero on success, else a negative error code. + * + * If this routine fails the device will probably be in an unusable state + * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { @@ -1537,10 +1553,7 @@ int usb_reset_configuration(struct usb_device *dev) * calls during probe() are fine */ - for (i = 1; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; @@ -1553,34 +1566,10 @@ int usb_reset_configuration(struct usb_device *dev) mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } - /* Make sure we have enough bandwidth for each alternate setting 0 */ - for (i = 0; i < config->desc.bNumInterfaces; i++) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - retval = usb_hcd_alloc_bandwidth(dev, NULL, - intf->cur_altsetting, alt); - if (retval < 0) - break; - } - /* If not, reinstate the old alternate settings */ + /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ + retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { -reset_old_alts: - for (i--; i >= 0; i--) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - usb_hcd_alloc_bandwidth(dev, NULL, - alt, intf->cur_altsetting); - } usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; @@ -1589,8 +1578,12 @@ int usb_reset_configuration(struct usb_device *dev) USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (retval < 0) - goto reset_old_alts; + if (retval < 0) { + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; + } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 7c1198f80c2316e48acdcbdefe9a3b64bcb5d096..f232914de5fd49f9b392d33d5719c8f1c7d66067 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -370,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_IGNORE }, + /* Sound Devices MixPre-D */ + { USB_DEVICE(0x0926, 0x0208), .driver_info = + USB_QUIRK_ENDPOINT_IGNORE }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -465,6 +469,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, + /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, @@ -509,6 +515,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { */ static const struct usb_device_id usb_endpoint_ignore[] = { { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 }, { } }; diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index a2ca38e25e0c33ab6a2c7aace36573068e3d9e36..8d134193fa0cf0ff7da568e5ee7f7b4bbdb30598 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, size_t srclen, n; int cfgno; void *src; + int retval; + retval = usb_lock_device_interruptible(udev); + if (retval < 0) + return -EINTR; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). @@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, off -= srclen; } } + usb_unlock_device(udev); return count - nleft; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 422aea24afcd1ffbc8cccdb802acb81632601c23..2eb34c8b4065f5939fac1ebd57b32ecc2870d1c2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -646,9 +646,8 @@ static int dwc3_phy_setup(struct dwc3 *dwc) if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI)) break; } - /* FALLTHROUGH */ + fallthrough; case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: - /* FALLTHROUGH */ default: break; } @@ -1411,7 +1410,7 @@ static void dwc3_check_params(struct dwc3 *dwc) default: dev_err(dev, "invalid maximum_speed parameter %d\n", dwc->maximum_speed); - /* fall through */ + fallthrough; case USB_SPEED_UNKNOWN: /* default to superspeed */ dwc->maximum_speed = USB_SPEED_SUPER; diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c index 88b75b5a039c949ba63b195ff80acc34a43649e6..1f7f4d88ed9d8d1227fde3ce460b3bfe4fc5abf6 100644 --- a/drivers/usb/dwc3/dwc3-meson-g12a.c +++ b/drivers/usb/dwc3/dwc3-meson-g12a.c @@ -737,13 +737,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) goto err_disable_clks; } - ret = reset_control_deassert(priv->reset); + ret = reset_control_reset(priv->reset); if (ret) - goto err_assert_reset; + goto err_disable_clks; ret = dwc3_meson_g12a_get_phys(priv); if (ret) - goto err_assert_reset; + goto err_disable_clks; ret = priv->drvdata->setup_regmaps(priv, base); if (ret) @@ -752,7 +752,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) if (priv->vbus) { ret = regulator_enable(priv->vbus); if (ret) - goto err_assert_reset; + goto err_disable_clks; } /* Get dr_mode */ @@ -765,13 +765,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) ret = priv->drvdata->usb_init(priv); if (ret) - goto err_assert_reset; + goto err_disable_clks; /* Init PHYs */ for (i = 0 ; i < PHY_COUNT ; ++i) { ret = phy_init(priv->phys[i]); if (ret) - goto err_assert_reset; + goto err_disable_clks; } /* Set PHY Power */ @@ -809,9 +809,6 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) for (i = 0 ; i < PHY_COUNT ; ++i) phy_exit(priv->phys[i]); -err_assert_reset: - reset_control_assert(priv->reset); - err_disable_clks: clk_bulk_disable_unprepare(priv->drvdata->num_clks, priv->drvdata->clks); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index e44bfc3b5096d987dd74f9ff0c14a273862c6568..c2a0f64f8d1e12e7b949ae8b6978a3a02aee5338 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1054,27 +1054,25 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, * dwc3_prepare_one_trb - setup one TRB from one request * @dep: endpoint for which this request is prepared * @req: dwc3_request pointer + * @trb_length: buffer size of the TRB * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, - struct dwc3_request *req, unsigned chain, unsigned node) + struct dwc3_request *req, unsigned int trb_length, + unsigned chain, unsigned node) { struct dwc3_trb *trb; - unsigned int length; dma_addr_t dma; unsigned stream_id = req->request.stream_id; unsigned short_not_ok = req->request.short_not_ok; unsigned no_interrupt = req->request.no_interrupt; unsigned is_last = req->request.is_last; - if (req->request.num_sgs > 0) { - length = sg_dma_len(req->start_sg); + if (req->request.num_sgs > 0) dma = sg_dma_address(req->start_sg); - } else { - length = req->request.length; + else dma = req->request.dma; - } trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1086,7 +1084,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, + __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, stream_id, short_not_ok, no_interrupt, is_last); } @@ -1096,16 +1094,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct scatterlist *sg = req->start_sg; struct scatterlist *s; int i; - + unsigned int length = req->request.length; unsigned int remaining = req->request.num_mapped_sgs - req->num_queued_sgs; + /* + * If we resume preparing the request, then get the remaining length of + * the request and resume where we left off. + */ + for_each_sg(req->request.sg, s, req->num_queued_sgs, i) + length -= sg_dma_len(s); + for_each_sg(sg, s, remaining, i) { - unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; + unsigned int trb_length; unsigned chain = true; + trb_length = min_t(unsigned int, length, sg_dma_len(s)); + + length -= trb_length; + /* * IOMMU driver is coalescing the list of sgs which shares a * page boundary into one and giving it to USB driver. With @@ -1113,7 +1122,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * sgs passed. So mark the chain bit to false if it isthe last * mapped sg. */ - if (i == remaining - 1) + if ((i == remaining - 1) || !length) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1123,7 +1132,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, i); + dwc3_prepare_one_trb(dep, req, trb_length, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1134,8 +1143,39 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt, req->request.is_last); + } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + !rem && !chain) { + struct dwc3 *dwc = dep->dwc; + struct dwc3_trb *trb; + + req->needs_extra_trb = true; + + /* Prepare normal TRB */ + dwc3_prepare_one_trb(dep, req, trb_length, true, i); + + /* Prepare one extra TRB to handle ZLP */ + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, + !req->direction, 1, + req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt, + req->request.is_last); + + /* Prepare one more TRB to handle MPS alignment */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt, + req->request.is_last); + } } else { - dwc3_prepare_one_trb(dep, req, chain, i); + dwc3_prepare_one_trb(dep, req, trb_length, chain, i); } /* @@ -1150,6 +1190,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->num_queued_sgs++; + /* + * The number of pending SG entries may not correspond to the + * number of mapped SG entries. If all the data are queued, then + * don't include unused SG entries. + */ + if (length == 0) { + req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs; + break; + } + if (!dwc3_calc_trbs_left(dep)) break; } @@ -1169,7 +1219,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1180,6 +1230,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->request.no_interrupt, req->request.is_last); } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && (IS_ALIGNED(req->request.length, maxp))) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; @@ -1187,18 +1238,29 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); - /* Now prepare one extra TRB to handle ZLP */ + /* Prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - false, 1, req->request.stream_id, + !req->direction, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt, req->request.is_last); + + /* Prepare one more TRB to handle MPS alignment for OUT */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt, + req->request.is_last); + } } else { - dwc3_prepare_one_trb(dep, req, false, 0); + dwc3_prepare_one_trb(dep, req, length, false, 0); } } @@ -2671,8 +2733,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, status); if (req->needs_extra_trb) { + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + + /* Reclaim MPS padding TRB for ZLP */ + if (!req->direction && req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (IS_ALIGNED(req->request.length, maxp))) + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + req->needs_extra_trb = false; } diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 331c951d72dc994ab4f16f4ee9d1c095f046dae6..950c9435beec376d7ca81c5e110af83d47ada626 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2039,7 +2039,6 @@ static int do_scsi_command(struct fsg_common *common) case RELEASE: case RESERVE: case SEND_DIAGNOSTIC: - fallthrough; default: unknown_cmnd: diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 1d900081b1f0ca5e5e01b24d701d1cd7a1459a9f..b4206b0dede54e99ef34263f937aaa9584c41130 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1181,12 +1181,15 @@ static int ncm_unwrap_ntb(struct gether *port, int ndp_index; unsigned dg_len, dg_len2; unsigned ndp_len; + unsigned block_len; struct sk_buff *skb2; int ret = -EINVAL; - unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; + bool ndp_after_header; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1205,25 +1208,37 @@ static int ncm_unwrap_ntb(struct gether *port, } tmp++; /* skip wSequence */ + block_len = get_ncm(&tmp, opts->block_length); /* (d)wBlockLength */ - if (get_ncm(&tmp, opts->block_length) > max_size) { + if (block_len > ntb_max) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } ndp_index = get_ncm(&tmp, opts->ndp_index); + ndp_after_header = false; /* Run through all the NDP's in the NTB */ do { - /* NCM 3.2 */ - if (((ndp_index % 4) != 0) && - (ndp_index < opts->nth_size)) { + /* + * NCM 3.2 + * dwNdpIndex + */ + if (((ndp_index % 4) != 0) || + (ndp_index < opts->nth_size) || + (ndp_index > (block_len - + opts->ndp_size))) { INFO(port->func.config->cdev, "Bad index: %#X\n", ndp_index); goto err; } + if (ndp_index == opts->nth_size) + ndp_after_header = true; - /* walk through NDP */ + /* + * walk through NDP + * dwSignature + */ tmp = (void *)(skb->data + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); @@ -1234,14 +1249,15 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 + * wLength * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry * Each entry is a dgram index and a dgram length. */ if ((ndp_len < opts->ndp_size - + 2 * 2 * (opts->dgram_item_len * 2)) - || (ndp_len % opts->ndplen_align != 0)) { + + 2 * 2 * (opts->dgram_item_len * 2)) || + (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %#X\n", ndp_len); goto err; @@ -1258,8 +1274,21 @@ static int ncm_unwrap_ntb(struct gether *port, do { index = index2; + /* wDatagramIndex[0] */ + if ((index < opts->nth_size) || + (index > block_len - opts->dpe_size)) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index); + goto err; + } + dg_len = dg_len2; - if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ + /* + * wDatagramLength[0] + * ethernet hdr + crc or larger than max frame size + */ + if ((dg_len < 14 + crc_len) || + (dg_len > frame_max)) { INFO(port->func.config->cdev, "Bad dgram length: %#X\n", dg_len); goto err; @@ -1283,6 +1312,37 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + if (index2 == 0 || dg_len2 == 0) + break; + + /* wDatagramIndex[1] */ + if (ndp_after_header) { + if (index2 < opts->nth_size + opts->ndp_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + } else { + if (index2 < opts->nth_size + opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + } + if (index2 > block_len - opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + + /* wDatagramLength[1] */ + if ((dg_len2 < 14 + crc_len) || + (dg_len2 > frame_max)) { + INFO(port->func.config->cdev, + "Bad dgram length: %#X\n", dg_len); + goto err; + } + /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1299,9 +1359,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; - - if (index2 == 0 || dg_len2 == 0) - break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index d94b814328c8bfb447e14947e5b4e34a97d4f31f..184165e27908ed91afb4003cd021582e8726c382 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -753,12 +753,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) goto err_sts; return 0; + err_sts: - usb_ep_free_request(fu->ep_status, stream->req_status); - stream->req_status = NULL; -err_out: usb_ep_free_request(fu->ep_out, stream->req_out); stream->req_out = NULL; +err_out: + usb_ep_free_request(fu->ep_in, stream->req_in); + stream->req_in = NULL; out: return -ENOMEM; } diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index eaa13fd3dc7f36bd4f3953817037ff3cb818f463..e313c3b8dcb19a2f48dd1a089d14469b1c5490c3 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -14,6 +14,7 @@ #define __U_F_H__ #include +#include /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 @@ -21,21 +22,36 @@ #define vla_item(groupname, type, name, n) \ size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = (n) * sizeof(type); \ - groupname##__next = offset + size; \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + size_t size = array_size(n, sizeof(type)); \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, size, \ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ offset; \ }) #define vla_item_with_sz(groupname, type, name, n) \ - size_t groupname##_##name##__sz = (n) * sizeof(type); \ - size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = groupname##_##name##__sz; \ - groupname##__next = offset + size; \ - offset; \ + size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ + size_t groupname##_##name##__offset = ({ \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ + if (check_add_overflow(offset, groupname##_##name##__sz,\ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ + offset; \ }) #define vla_ptr(ptr, groupname, name) \ diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index fa6793065c7c986e47ebf9335afa7a9004615617..a6426dd1cfefe296b9e8472ae6e9da68abd0c06d 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -328,7 +328,7 @@ static int usba_config_fifo_table(struct usba_udc *udc) switch (fifo_mode) { default: fifo_mode = 0; - /* fall through */ + fallthrough; case 0: udc->fifo_cfg = NULL; n = 0; diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index b2638e83bb49898dc10706cdda9d5c10e706b803..a6f7b2594c090b72439120193f57e4de8581e423 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -250,7 +250,7 @@ static int dr_controller_setup(struct fsl_udc *udc) break; case FSL_USB2_PHY_UTMI_WIDE: portctrl |= PORTSCX_PTW_16BIT; - /* fall through */ + fallthrough; case FSL_USB2_PHY_UTMI: case FSL_USB2_PHY_UTMI_DUAL: if (udc->pdata->have_sysif_regs) { diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c index cfafdd92c2a841d7c425b52a9bb8b8e921fcd5aa..10324a7334fe170ae2cd7fbd21c62ade14d75538 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.c +++ b/drivers/usb/gadget/udc/pxa25x_udc.c @@ -2340,12 +2340,12 @@ static int pxa25x_udc_probe(struct platform_device *pdev) case PXA250_A0: case PXA250_A1: /* A0/A1 "not released"; ep 13, 15 unusable */ - /* fall through */ + fallthrough; case PXA250_B2: case PXA210_B2: case PXA250_B1: case PXA210_B1: case PXA250_B0: case PXA210_B0: /* OUT-DMA is broken ... */ - /* fall through */ + fallthrough; case PXA250_C0: case PXA210_C0: break; #elif defined(CONFIG_ARCH_IXP4XX) diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index a87c0b26279e71e4492ae377f7652b60dc0df40d..3055d9abfec302c5c931025b5bfa5b2c0d1afc7b 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1019,7 +1019,7 @@ static int isp116x_hub_control(struct usb_hcd *hcd, spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC); spin_unlock_irqrestore(&isp116x->lock, flags); - /* fall through */ + fallthrough; case C_HUB_LOCAL_POWER: DBG("C_HUB_LOCAL_POWER\n"); break; @@ -1421,10 +1421,10 @@ static int isp116x_bus_suspend(struct usb_hcd *hcd) isp116x_write_reg32(isp116x, HCCONTROL, (val & ~HCCONTROL_HCFS) | HCCONTROL_USB_RESET); - /* fall through */ + fallthrough; case HCCONTROL_USB_RESET: ret = -EBUSY; - /* fall through */ + fallthrough; default: /* HCCONTROL_USB_SUSPEND */ spin_unlock_irqrestore(&isp116x->lock, flags); break; diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index bd40e597f25663ddc515ce04020835b580aaf194..5f5e8a64c8e2e01761fbf710c298d6e2fe2cc831 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index b8961c0381cfd4fe87eca833834dfd65d0eac786..8c1bbac6d1366d5555873e80cd0a85c780f544ea 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -957,7 +957,8 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev) ehci_bios_handoff(pdev, op_reg_base, cap, offset); break; case 0: /* Illegal reserved cap, set cap=0 so we exit */ - cap = 0; /* fall through */ + cap = 0; + fallthrough; default: dev_warn(&pdev->dev, "EHCI: unrecognized capability %02x\n", diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index fcc5ac5ce8b10b7b61fe6937d4b4d1505950d46d..ccb0156fcebebf2afd72ad4945f5444ad03eda66 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -699,7 +699,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) switch (comp_code) { case COMP_SUCCESS: remain_length = 0; - /* FALLTHROUGH */ + fallthrough; case COMP_SHORT_PACKET: status = 0; break; diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 92e25a62fdb5b7fc8c76656345ab62b0841a95ef..c88bffd68742de8a259482dde93a5b2956507e1b 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -274,7 +274,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) static int xhci_endpoint_context_show(struct seq_file *s, void *unused) { - int dci; + int ep_index; dma_addr_t dma; struct xhci_hcd *xhci; struct xhci_ep_ctx *ep_ctx; @@ -283,9 +283,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); - for (dci = 1; dci < 32; dci++) { - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); - dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); + for (ep_index = 0; ep_index < 31; ep_index++) { + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), le32_to_cpu(ep_ctx->ep_info2), diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index c3554e37e09f3db3e936bbdf53360125ceea9b54..c799ca5361d4d9c96a695c4540946150fa6c81f5 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -740,15 +740,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, { u32 pls = status_reg & PORT_PLS_MASK; - /* resume state is a xHCI internal state. - * Do not report it to usb core, instead, pretend to be U3, - * thus usb core knows it's not ready for transfer - */ - if (pls == XDEV_RESUME) { - *status |= USB_SS_PORT_LS_U3; - return; - } - /* When the CAS bit is set then warm reset * should be performed on port */ @@ -770,6 +761,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, */ pls |= USB_PORT_STAT_CONNECTION; } else { + /* + * Resume state is an xHCI internal state. Do not report it to + * usb core, instead, pretend to be U3, thus usb core knows + * it's not ready for transfer. + */ + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; + return; + } + /* * If CAS bit isn't set but the Port is already at * Compliance Mode, fake a connection so the USB core @@ -1483,7 +1484,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case USB_PORT_FEAT_C_SUSPEND: bus_state->port_c_suspend &= ~(1 << wIndex); - /* fall through */ + fallthrough; case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_BH_PORT_RESET: case USB_PORT_FEAT_C_CONNECTION: diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 696fad50b478b22564d917c5dcfc17a553e48bbb..fe405cd38dbc11790b5fd91e41d78f8136c306e9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1311,7 +1311,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, interval = xhci_parse_microframe_interval(udev, ep); break; } - /* Fall through - SS and HS isoc/int have same decoding */ + fallthrough; /* SS and HS isoc/int have same decoding */ case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: @@ -1331,7 +1331,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, * since it uses the same rules as low speed interrupt * endpoints. */ - /* fall through */ + fallthrough; case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 59b1965ad0a3f1f17c3d7d50306f60240eec331d..f97ac9f52bf4d9cc15b244e067e87e918847a249 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -50,20 +50,6 @@ #define RENESAS_RETRY 10000 #define RENESAS_DELAY 10 -#define ROM_VALID_01 0x2013 -#define ROM_VALID_02 0x2026 - -static int renesas_verify_fw_version(struct pci_dev *pdev, u32 version) -{ - switch (version) { - case ROM_VALID_01: - case ROM_VALID_02: - return 0; - } - dev_err(&pdev->dev, "FW has invalid version :%d\n", version); - return -EINVAL; -} - static int renesas_fw_download_image(struct pci_dev *dev, const u32 *fw, size_t step, bool rom) { @@ -202,10 +188,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev) version &= RENESAS_FW_VERSION_FIELD; version = version >> RENESAS_FW_VERSION_OFFSET; - - err = renesas_verify_fw_version(pdev, version); - if (err) - return err; + dev_dbg(&pdev->dev, "Found ROM version: %x\n", version); /* * Test if ROM is present and loaded, if so we can skip everything diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 2c255d0620b054d789e6a5b8a044205debd4bbab..a741a38a4c69072ccb10fbb37693304bcc915e80 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2103,7 +2103,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", trb_comp_code, ep_index); - /* else fall through */ + fallthrough; case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ if (trb_type == TRB_DATA || trb_type == TRB_NORMAL) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 014d79334f50e1de0390c287b8144a4e0f583fb4..190923d8b246ee7c65436908bbebd1d6279f19c3 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1136,7 +1136,7 @@ static struct phy *tegra_xusb_get_phy(struct tegra_xusb *tegra, char *name, unsigned int i, phy_count = 0; for (i = 0; i < tegra->soc->num_types; i++) { - if (!strncmp(tegra->soc->phy_types[i].name, "usb2", + if (!strncmp(tegra->soc->phy_types[i].name, name, strlen(name))) return tegra->phys[phy_count+port]; @@ -1258,6 +1258,8 @@ static int tegra_xusb_init_usb_phy(struct tegra_xusb *tegra) INIT_WORK(&tegra->id_work, tegra_xhci_id_work); tegra->id_nb.notifier_call = tegra_xhci_id_notify; + tegra->otg_usb2_port = -EINVAL; + tegra->otg_usb3_port = -EINVAL; for (i = 0; i < tegra->num_usb_phys; i++) { struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3c41b14ecce720280bb1c37b8493407aff73dd6a..f4cedcaee14b36b42e25b5367d949721a2d2ef9e 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3236,10 +3236,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, wait_for_completion(cfg_cmd->completion); - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; xhci_free_command(xhci, cfg_cmd); cleanup: xhci_free_command(xhci, stop_cmd); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, @@ -4618,7 +4619,7 @@ static unsigned long long xhci_calculate_intel_u1_timeout( break; } /* Otherwise the calculation is the same as isoc eps */ - /* fall through */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: timeout_ns = xhci_service_interval_to_ns(desc); timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 407fe7570f3bc228e27548cd1bfc9c78b82cde0b..f8686139d6f392bd51ee59309faaf466514eb043 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf, USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT); if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) { dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret); - return ret; + return ret < 0 ? ret : -EINVAL; } /* submit urb to poll interrupt endpoint */ diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 6e7d34e7fec4338c479d570dd977e01f51049f17..b2e09883c7e2a20e0a319bee894b1ab9389198d0 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, dev->cntl_buffer[0]); - retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL); + retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); if (retval >= 0) timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index c545b27ea56819bd255442d4cd400a069333a426..edb5b63d70634d631e9a473947ff910adc162296 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -975,7 +975,7 @@ static int cppi_channel_program(struct dma_channel *ch, musb_dbg(musb, "%cX DMA%d not allocated!", cppi_ch->transmit ? 'T' : 'R', cppi_ch->index); - /* FALLTHROUGH */ + fallthrough; case MUSB_DMA_STATUS_FREE: break; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 5a56a03996b1df9d68d3a1b5d59492256e8b52a9..849e0b770130a6bf08033bc38f8d655f56f5c259 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -852,7 +852,7 @@ static void musb_handle_intr_suspend(struct musb *musb, u8 devctl) case OTG_STATE_B_IDLE: if (!musb->is_active) break; - /* fall through */ + fallthrough; case OTG_STATE_B_PERIPHERAL: musb_g_suspend(musb); musb->is_active = musb->g.b_hnp_enable; @@ -972,9 +972,8 @@ static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl) case OTG_STATE_A_PERIPHERAL: musb_hnp_stop(musb); musb_root_disconnect(musb); - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_B_WAIT_ACON: - /* FALLTHROUGH */ case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: musb_g_disconnect(musb); @@ -1009,7 +1008,7 @@ static void musb_handle_intr_reset(struct musb *musb) switch (musb->xceiv->otg->state) { case OTG_STATE_A_SUSPEND: musb_g_reset(musb); - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ /* never use invalid T(a_wait_bcon) */ musb_dbg(musb, "HNP: in %s, %d msec timeout", @@ -1030,7 +1029,7 @@ static void musb_handle_intr_reset(struct musb *musb) break; case OTG_STATE_B_IDLE: musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_B_PERIPHERAL: musb_g_reset(musb); break; @@ -1471,7 +1470,7 @@ static int ep_config_from_table(struct musb *musb) switch (fifo_mode) { default: fifo_mode = 0; - /* FALLTHROUGH */ + fallthrough; case 0: cfg = mode_0_cfg; n = ARRAY_SIZE(mode_0_cfg); @@ -2018,7 +2017,7 @@ static void musb_pm_runtime_check_session(struct musb *musb) musb->quirk_retries--; return; } - /* fall through */ + fallthrough; case MUSB_QUIRK_A_DISCONNECT_19: if (musb->quirk_retries && !musb->flush_irq_work) { musb_dbg(musb, diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 19556c1a8ae878b8f49a2bf6f972dc343420bb2e..30085b2be7b90bf132641e0e726e57b4d5048e8d 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -232,7 +232,7 @@ static int dsps_check_status(struct musb *musb, void *unused) dsps_mod_timer_optional(glue); break; } - /* fall through */ + fallthrough; case OTG_STATE_A_WAIT_BCON: /* keep VBUS on for host-only mode */ @@ -242,7 +242,7 @@ static int dsps_check_status(struct musb *musb, void *unused) } musb_writeb(musb->mregs, MUSB_DEVCTL, 0); skip_session = 1; - /* fall through */ + fallthrough; case OTG_STATE_A_IDLE: case OTG_STATE_B_IDLE: @@ -793,7 +793,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, case USB_SPEED_SUPER: dev_warn(dev, "ignore incorrect maximum_speed " "(super-speed) setting in dts"); - /* fall through */ + fallthrough; default: config->maximum_speed = USB_SPEED_HIGH; } diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 0ae3e0be043ed2496f8000b3de4d33e7441fcefb..44d3cb02fa76a8b802242a425504a0b7d5823aaa 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -735,7 +735,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) musb_writeb(mbase, MUSB_TESTMODE, musb->test_mode_nr); } - /* FALLTHROUGH */ + fallthrough; case MUSB_EP0_STAGE_STATUSOUT: /* end of sequence #1: write to host (TX state) */ @@ -767,7 +767,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) */ retval = IRQ_HANDLED; musb->ep0_state = MUSB_EP0_STAGE_SETUP; - /* FALLTHROUGH */ + fallthrough; case MUSB_EP0_STAGE_SETUP: setup: diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 8b7d22a0c0fba0b8ef00e1fef2674a69c71e41e5..30c5e7de0761c5aa2681eb6050cb14c9fe99ba61 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -360,7 +360,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, qh = first_qh(head); break; } - /* fall through */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: @@ -1019,7 +1019,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) musb->ep0_stage = MUSB_EP0_OUT; more = true; } - /* FALLTHROUGH */ + fallthrough; case MUSB_EP0_OUT: fifo_count = min_t(size_t, qh->maxpacket, urb->transfer_buffer_length - @@ -2222,7 +2222,7 @@ static int musb_urb_enqueue( interval = max_t(u8, epd->bInterval, 1); break; } - /* FALLTHROUGH */ + fallthrough; case USB_ENDPOINT_XFER_ISOC: /* ISO always uses logarithmic encoding */ interval = min_t(u8, epd->bInterval, 16); diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index cb7ae297a3af009376c7b45357fc8ad0750e38f2..cafc69536e1d787679a14d7c3d74b97da9e34b08 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -211,7 +211,7 @@ void musb_root_disconnect(struct musb *musb) musb->g.is_a_peripheral = 1; break; } - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_A_HOST: musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; musb->is_active = 0; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index d62c78b97cadb3f0947c90a535a539550973cd59..4232f1ce3fbfaac8626d3f349c2bec4cd2cb2dc7 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -104,7 +104,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) if (error) break; musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; - /* Fall through */ + fallthrough; case OTG_STATE_A_WAIT_VRISE: case OTG_STATE_A_WAIT_BCON: case OTG_STATE_A_HOST: diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 99890d1bbfcb54dbbbba7e7c9f0a9c6792cf0222..c26683a2702b65ad542588cb16cad5cd7e186137 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -464,7 +464,7 @@ static void musb_do_idle(struct timer_list *t) dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n", usb_otg_state_string(musb->xceiv->otg->state)); } - /* FALLTHROUGH */ + fallthrough; case OTG_STATE_A_IDLE: tusb_musb_set_vbus(musb, 0); default: diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c index d4ee3cb721ea8cac0352d0aab0342fa9a7190460..f6d3731581ebdeac4a53c2a1f5b611115896ff32 100644 --- a/drivers/usb/phy/phy-jz4770.c +++ b/drivers/usb/phy/phy-jz4770.c @@ -176,6 +176,7 @@ static int ingenic_usb_phy_init(struct usb_phy *phy) /* Wait for PHY to reset */ usleep_range(30, 300); + reg = readl(priv->base + REG_USBPCR_OFFSET); writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET); usleep_range(300, 1000); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 871cdccf3a5f1c6ec69d30ad0bd0bd2fa33f3af5..9823bb424abd9abe465c788ef97ab5ff51c74766 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e8373528264c3634a9e474f2836a82d2b5b8b7a5..b5ca17a5967a0d6cbefbc925208b65a331b82f5e 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -160,6 +160,7 @@ #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ +#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 89b3192af3269b5acc80fd8df7e3727585852c50..0c6f160a214ab1ef18d781c8d019e706376b361f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1094,14 +1094,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), - .driver_info = RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, @@ -1819,6 +1823,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c index c8a988d2cfdd357b2c6ea4c469410d2887717fec..15dc25801cdcc52f292313db6d0f5e9e81329d14 100644 --- a/drivers/usb/storage/sddr55.c +++ b/drivers/usb/storage/sddr55.c @@ -592,7 +592,7 @@ static unsigned long sddr55_get_capacity(struct us_data *us) { case 0x64: info->pageshift = 8; info->smallpageshift = 1; - /* fall through */ + fallthrough; case 0x5d: // 5d is a ROM card with pagesize 512. return 0x00200000; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index d592071119ba6b84275dbe19916b8eaa20e24c82..08f9296431e9af243c0c36de2870d7a8d961691c 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -688,7 +688,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, break; case DMA_BIDIRECTIONAL: cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB; - /* fall through */ + fallthrough; case DMA_TO_DEVICE: cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB; case DMA_NONE: diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 220ae2c356eee84d5fa869af9491f3a261967667..5732e9691f08f2f8a3c2661a3feaa46f6dcbbac6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_BROKEN_FUA ), + US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), /* Reported by Andrey Rahmatullin */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 162b09d69f62ff5b6e5e4529b190afffa3937de8..711ab240058c7d3ef8eebc62240befe19cf71f2c 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,13 @@ * and don't forget to CC: the USB development list */ +/* Reported-by: Till Dörges */ +UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, + "Sony", + "PSZ-HA*", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Julian Groß */ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, "LaCie", @@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported-by: Thinh Nguyen */ +UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index e4021e13af40a3b3b440b9eed1fa53887a513fa1..ec7da0fa3cf8c6c54b430d9edeb97a7c47e567af 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -61,14 +61,11 @@ enum { #define PMC_USB_ALTMODE_ORI_SHIFT 1 #define PMC_USB_ALTMODE_UFP_SHIFT 3 -#define PMC_USB_ALTMODE_ORI_AUX_SHIFT 4 -#define PMC_USB_ALTMODE_ORI_HSL_SHIFT 5 /* DP specific Mode Data bits */ #define PMC_USB_ALTMODE_DP_MODE_SHIFT 8 /* TBT specific Mode Data bits */ -#define PMC_USB_ALTMODE_HPD_HIGH BIT(14) #define PMC_USB_ALTMODE_TBT_TYPE BIT(17) #define PMC_USB_ALTMODE_CABLE_TYPE BIT(18) #define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20) @@ -179,15 +176,9 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT; req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT; - req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT; - req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT; - req.mode_data |= (state->mode - TYPEC_STATE_MODAL) << PMC_USB_ALTMODE_DP_MODE_SHIFT; - if (data->status & DP_STATUS_HPD_STATE) - req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH; - ret = pmc_usb_command(port, (void *)&req, sizeof(req)); if (ret) return ret; @@ -212,9 +203,6 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state) req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT; req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT; - req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT; - req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT; - if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3) req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE; @@ -497,6 +485,7 @@ static int pmc_usb_probe(struct platform_device *pdev) for (i = 0; i < pmc->num_ports; i++) { typec_switch_unregister(pmc->port[i].typec_sw); typec_mux_unregister(pmc->port[i].typec_mux); + usb_role_switch_unregister(pmc->port[i].usb_sw); } return ret; @@ -510,6 +499,7 @@ static int pmc_usb_remove(struct platform_device *pdev) for (i = 0; i < pmc->num_ports; i++) { typec_switch_unregister(pmc->port[i].typec_sw); typec_mux_unregister(pmc->port[i].typec_mux); + usb_role_switch_unregister(pmc->port[i].usb_sw); } return 0; diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index f57d91fd0e092454fc5df9dc807a3a2859f9bb65..bd80e03b2b6f806c6a68c8c30d7089e11ffdb064 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -157,7 +157,7 @@ static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink) case 0x3: if (sink) return TYPEC_CC_RP_3_0; - /* fall through */ + fallthrough; case 0x0: default: return TYPEC_CC_OPEN; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 3ef37202ee3757597b900dec25b34b823f7424b9..a48e3f90d19610c2dcf577b3c974b54b78ea319a 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3372,13 +3372,31 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0); break; case SRC_HARD_RESET_VBUS_OFF: - tcpm_set_vconn(port, true); + /* + * 7.1.5 Response to Hard Resets + * Hard Reset Signaling indicates a communication failure has occurred and the + * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall + * drive VBUS to vSafe0V as shown in Figure 7-9. + */ + tcpm_set_vconn(port, false); tcpm_set_vbus(port, false); tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, tcpm_data_role_for_source(port)); - tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); + /* + * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V + + * PD_T_SRC_RECOVER before turning vbus back on. + * From Table 7-12 Sequence Description for a Source Initiated Hard Reset: + * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then + * tells the Device Policy Manager to instruct the power supply to perform a + * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2). + * 5. After tSrcRecover the Source applies power to VBUS in an attempt to + * re-establish communication with the Sink and resume USB Default Operation. + * The transition to vSafe5V Shall occur within tSrcTurnOn(t4). + */ + tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER); break; case SRC_HARD_RESET_VBUS_ON: + tcpm_set_vconn(port, true); tcpm_set_vbus(port, true); port->tcpc->set_pd_rx(port->tcpc, true); tcpm_set_attached_state(port, true); @@ -3944,7 +3962,11 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0); break; case SRC_HARD_RESET_VBUS_OFF: - tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0); + /* + * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait + * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V. + */ + tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); break; case HARD_RESET_SEND: break; diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 048381c058a5bfe4dd14aa4a118bc128d30509ef..261131c9e37c63a05b0f52adad5ed10bfae80553 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -288,8 +288,6 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, struct typec_altmode *alt; struct ucsi_dp *dp; - mutex_lock(&con->lock); - /* We can't rely on the firmware with the capabilities. */ desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE; @@ -298,15 +296,12 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, desc->vdo |= all_assignments << 16; alt = typec_port_register_altmode(con->port, desc); - if (IS_ERR(alt)) { - mutex_unlock(&con->lock); + if (IS_ERR(alt)) return alt; - } dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL); if (!dp) { typec_unregister_altmode(alt); - mutex_unlock(&con->lock); return ERR_PTR(-ENOMEM); } @@ -319,7 +314,5 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con, alt->ops = &ucsi_displayport_ops; typec_altmode_set_drvdata(alt, dp); - mutex_unlock(&con->lock); - return alt; } diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index affd024190c9116cafb112e7bbc5c7d690cfdf0a..e680fcfdee609a98eaf63fb7a9b350310e1e0227 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -146,40 +146,33 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) return UCSI_CCI_LENGTH(cci); } -static int ucsi_run_command(struct ucsi *ucsi, u64 command, - void *data, size_t size) +int ucsi_send_command(struct ucsi *ucsi, u64 command, + void *data, size_t size) { u8 length; int ret; + mutex_lock(&ucsi->ppm_lock); + ret = ucsi_exec_command(ucsi, command); if (ret < 0) - return ret; + goto out; length = ret; if (data) { ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size); if (ret) - return ret; + goto out; } ret = ucsi_acknowledge_command(ucsi); if (ret) - return ret; - - return length; -} - -int ucsi_send_command(struct ucsi *ucsi, u64 command, - void *retval, size_t size) -{ - int ret; + goto out; - mutex_lock(&ucsi->ppm_lock); - ret = ucsi_run_command(ucsi, command, retval, size); + ret = length; +out: mutex_unlock(&ucsi->ppm_lock); - return ret; } EXPORT_SYMBOL_GPL(ucsi_send_command); @@ -205,7 +198,7 @@ void ucsi_altmode_update_active(struct ucsi_connector *con) int i; command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num); - ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur)); + ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur)); if (ret < 0) { if (con->ucsi->version > 0x0100) { dev_err(con->ucsi->dev, @@ -354,7 +347,7 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient) command |= UCSI_GET_ALTMODE_RECIPIENT(recipient); command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num); command |= UCSI_GET_ALTMODE_OFFSET(i); - len = ucsi_run_command(con->ucsi, command, &alt, sizeof(alt)); + len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt)); /* * We are collecting all altmodes first and then registering. * Some type-C device will return zero length data beyond last @@ -431,7 +424,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient) command |= UCSI_GET_ALTMODE_RECIPIENT(recipient); command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num); command |= UCSI_GET_ALTMODE_OFFSET(i); - len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt)); + len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt)); if (len <= 0) return len; @@ -502,7 +495,7 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner) command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner); command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1); command |= UCSI_GET_PDOS_SRC_PDOS; - ret = ucsi_run_command(ucsi, command, con->src_pdos, + ret = ucsi_send_command(ucsi, command, con->src_pdos, sizeof(con->src_pdos)); if (ret < 0) { dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret); @@ -681,7 +674,7 @@ static void ucsi_handle_connector_change(struct work_struct *work) */ command = UCSI_GET_CAM_SUPPORTED; command |= UCSI_CONNECTOR_NUMBER(con->num); - ucsi_run_command(con->ucsi, command, NULL, 0); + ucsi_send_command(con->ucsi, command, NULL, 0); } if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) @@ -736,20 +729,24 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) u32 cci; int ret; + mutex_lock(&ucsi->ppm_lock); + ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command, sizeof(command)); if (ret < 0) - return ret; + goto out; tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); do { - if (time_is_before_jiffies(tmo)) - return -ETIMEDOUT; + if (time_is_before_jiffies(tmo)) { + ret = -ETIMEDOUT; + goto out; + } ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); if (ret) - return ret; + goto out; /* If the PPM is still doing something else, reset it again. */ if (cci & ~UCSI_CCI_RESET_COMPLETE) { @@ -757,13 +754,15 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) &command, sizeof(command)); if (ret < 0) - return ret; + goto out; } msleep(20); } while (!(cci & UCSI_CCI_RESET_COMPLETE)); - return 0; +out: + mutex_unlock(&ucsi->ppm_lock); + return ret; } static int ucsi_role_cmd(struct ucsi_connector *con, u64 command) @@ -775,9 +774,7 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command) u64 c; /* PPM most likely stopped responding. Resetting everything. */ - mutex_lock(&con->ucsi->ppm_lock); ucsi_reset_ppm(con->ucsi); - mutex_unlock(&con->ucsi->ppm_lock); c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy; ucsi_send_command(con->ucsi, c, NULL, 0); @@ -901,12 +898,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) con->num = index + 1; con->ucsi = ucsi; + /* Delay other interactions with the con until registration is complete */ + mutex_lock(&con->lock); + /* Get connector capability */ command = UCSI_GET_CONNECTOR_CAPABILITY; command |= UCSI_CONNECTOR_NUMBER(con->num); - ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap)); + ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap)); if (ret < 0) - return ret; + goto out; if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP) cap->data = TYPEC_PORT_DRD; @@ -938,27 +938,32 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) ret = ucsi_register_port_psy(con); if (ret) - return ret; + goto out; /* Register the connector */ con->port = typec_register_port(ucsi->dev, cap); - if (IS_ERR(con->port)) - return PTR_ERR(con->port); + if (IS_ERR(con->port)) { + ret = PTR_ERR(con->port); + goto out; + } /* Alternate modes */ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON); - if (ret) + if (ret) { dev_err(ucsi->dev, "con%d: failed to register alt modes\n", con->num); + goto out; + } /* Get the status */ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num); - ret = ucsi_run_command(ucsi, command, &con->status, - sizeof(con->status)); + ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status)); if (ret < 0) { dev_err(ucsi->dev, "con%d: failed to get status\n", con->num); - return 0; + ret = 0; + goto out; } + ret = 0; /* ucsi_send_command() returns length on success */ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) { case UCSI_CONSTAT_PARTNER_TYPE_UFP: @@ -983,17 +988,21 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) if (con->partner) { ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP); - if (ret) + if (ret) { dev_err(ucsi->dev, "con%d: failed to register alternate modes\n", con->num); - else + ret = 0; + } else { ucsi_altmode_update_active(con); + } } trace_ucsi_register_port(con->num, &con->status); - return 0; +out: + mutex_unlock(&con->lock); + return ret; } /** @@ -1009,8 +1018,6 @@ static int ucsi_init(struct ucsi *ucsi) int ret; int i; - mutex_lock(&ucsi->ppm_lock); - /* Reset the PPM */ ret = ucsi_reset_ppm(ucsi); if (ret) { @@ -1021,13 +1028,13 @@ static int ucsi_init(struct ucsi *ucsi) /* Enable basic notifications */ ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR; command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; - ret = ucsi_run_command(ucsi, command, NULL, 0); + ret = ucsi_send_command(ucsi, command, NULL, 0); if (ret < 0) goto err_reset; /* Get PPM capabilities */ command = UCSI_GET_CAPABILITY; - ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap)); + ret = ucsi_send_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap)); if (ret < 0) goto err_reset; @@ -1054,12 +1061,10 @@ static int ucsi_init(struct ucsi *ucsi) /* Enable all notifications */ ucsi->ntfy = UCSI_ENABLE_NTFY_ALL; command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; - ret = ucsi_run_command(ucsi, command, NULL, 0); + ret = ucsi_send_command(ucsi, command, NULL, 0); if (ret < 0) goto err_unregister; - mutex_unlock(&ucsi->ppm_lock); - return 0; err_unregister: @@ -1074,8 +1079,6 @@ static int ucsi_init(struct ucsi *ucsi) err_reset: ucsi_reset_ppm(ucsi); err: - mutex_unlock(&ucsi->ppm_lock); - return ret; } diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 9fc4f338e8700ec069d9154cf77ee8ef9880201f..c0aca2f0f23f07f0f8c345474bf4709e19cf4e9c 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -112,11 +112,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) static int ucsi_acpi_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct ucsi_acpi *ua; struct resource *res; acpi_status status; int ret; + if (adev->dep_unmet) + return -EPROBE_DEFER; + ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL); if (!ua) return -ENOMEM; diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 2305d425e6c9adbc8ebdbcf4db45770da3710c0d..9d7d642022d1f65b637a150ae1296a8af75eb017 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -461,6 +461,11 @@ static void stub_disconnect(struct usb_device *udev) return; } +static bool usbip_match(struct usb_device *udev) +{ + return true; +} + #ifdef CONFIG_PM /* These functions need usb_port_suspend and usb_port_resume, @@ -486,6 +491,7 @@ struct usb_device_driver stub_driver = { .name = "usbip-host", .probe = stub_probe, .disconnect = stub_disconnect, + .match = usbip_match, #ifdef CONFIG_PM .suspend = stub_suspend, .resume = stub_resume, diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 08f267a2aafec962fc89f1f6d3cd0b2a26bc478d..64696d63fe0770221341505201e432bd2bda0130 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -84,7 +84,7 @@ struct ifcvf_hw { void __iomem * const *base; char config_msix_name[256]; struct vdpa_callback config_cb; - + unsigned int config_irq; }; struct ifcvf_adapter { diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 076d7ac5e723f9f2b227e631ea88526badcdb8f7..8b4028556cb66e42f8f13d294bc88800c2ed673c 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -55,6 +55,7 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) vf->vring[i].irq = -EINVAL; } + devm_free_irq(&pdev->dev, vf->config_irq, vf); ifcvf_free_irq_vectors(pdev); } @@ -74,10 +75,14 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter) snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n", pci_name(pdev)); vector = 0; - irq = pci_irq_vector(pdev, vector); - ret = devm_request_irq(&pdev->dev, irq, + vf->config_irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, vf->config_irq, ifcvf_config_changed, 0, vf->config_msix_name, vf); + if (ret) { + IFCVF_ERR(pdev, "Failed to request config irq\n"); + return ret; + } for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 9df69d5efe8c7fef4b319be9c24a51464b3d0913..70676a6d16914a377934dca3b3d9b429e56c7b32 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -16,19 +16,19 @@ #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev) #define VALID_FEATURES_MASK \ - (BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) | \ - BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) | \ - BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) | \ - BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \ - BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) | \ - BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) | \ - BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) | \ - BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) | \ - BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) | \ - BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) | \ - BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) | \ - BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) | \ - BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV)) + (BIT_ULL(VIRTIO_NET_F_CSUM) | BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | \ + BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_MAC) | \ + BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | \ + BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | \ + BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | BIT_ULL(VIRTIO_NET_F_HOST_ECN) | BIT_ULL(VIRTIO_NET_F_HOST_UFO) | \ + BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | BIT_ULL(VIRTIO_NET_F_STATUS) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | \ + BIT_ULL(VIRTIO_NET_F_CTRL_RX) | BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | \ + BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT_ULL(VIRTIO_NET_F_GUEST_ANNOUNCE) | \ + BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | \ + BIT_ULL(VIRTIO_NET_F_RSS) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_STANDBY) | \ + BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX) | BIT_ULL(VIRTIO_F_NOTIFY_ON_EMPTY) | \ + BIT_ULL(VIRTIO_F_ANY_LAYOUT) | BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) | \ + BIT_ULL(VIRTIO_F_RING_PACKED) | BIT_ULL(VIRTIO_F_ORDER_PLATFORM) | BIT_ULL(VIRTIO_F_SR_IOV)) #define VALID_STATUS_MASK \ (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \ @@ -149,7 +149,7 @@ static bool mlx5_vdpa_debug; #define MLX5_LOG_VIO_FLAG(_feature) \ do { \ - if (features & BIT(_feature)) \ + if (features & BIT_ULL(_feature)) \ mlx5_vdpa_info(mvdev, "%s\n", #_feature); \ } while (0) @@ -750,10 +750,10 @@ static bool vq_is_tx(u16 idx) static u16 get_features_12_3(u64 features) { - return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) | - (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) | - (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) | - (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6); + return (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO4)) << 9) | + (!!(features & BIT_ULL(VIRTIO_NET_F_HOST_TSO6)) << 8) | + (!!(features & BIT_ULL(VIRTIO_NET_F_CSUM)) << 7) | + (!!(features & BIT_ULL(VIRTIO_NET_F_GUEST_CSUM)) << 6); } static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) @@ -1439,13 +1439,13 @@ static u64 mlx_to_vritio_features(u16 dev_features) u64 result = 0; if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM) - result |= BIT(VIRTIO_NET_F_GUEST_CSUM); + result |= BIT_ULL(VIRTIO_NET_F_GUEST_CSUM); if (dev_features & MLX5_VIRTIO_NET_F_CSUM) - result |= BIT(VIRTIO_NET_F_CSUM); + result |= BIT_ULL(VIRTIO_NET_F_CSUM); if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6) - result |= BIT(VIRTIO_NET_F_HOST_TSO6); + result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO6); if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4) - result |= BIT(VIRTIO_NET_F_HOST_TSO4); + result |= BIT_ULL(VIRTIO_NET_F_HOST_TSO4); return result; } @@ -1459,15 +1459,15 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev) dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask); ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features); if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0)) - ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1); - ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM); + ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); + ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); print_features(mvdev, ndev->mvdev.mlx_features, false); return ndev->mvdev.mlx_features; } static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) { - if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM))) + if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return -EOPNOTSUPP; return 0; diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 620465c2a1da4569c5ae503d820e8efc982aa26f..1ab1f5cda4ac27431f5a512c60598560e95137d1 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -990,7 +990,7 @@ static long vfio_pci_ioctl(void *device_data, case VFIO_PCI_ERR_IRQ_INDEX: if (pci_is_pcie(vdev->pdev)) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 86a02aff8735ffa2a7223821971e76d47157ccd5..61ca8ab165dc16f930296c7fd20263cfe9680b17 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -33,12 +33,14 @@ struct vfio_pci_ioeventfd { struct list_head next; + struct vfio_pci_device *vdev; struct virqfd *virqfd; void __iomem *addr; uint64_t data; loff_t pos; int bar; int count; + bool test_mem; }; struct vfio_pci_irq_ctx { diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 916b184df3a5b52637c00779a7a883cf0fbcdf77..9e353c484ace2676b5fa51f0c07f0b50208cd04e 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -37,17 +37,70 @@ #define vfio_ioread8 ioread8 #define vfio_iowrite8 iowrite8 +#define VFIO_IOWRITE(size) \ +static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \ + bool test_mem, u##size val, void __iomem *io) \ +{ \ + if (test_mem) { \ + down_read(&vdev->memory_lock); \ + if (!__vfio_pci_memory_enabled(vdev)) { \ + up_read(&vdev->memory_lock); \ + return -EIO; \ + } \ + } \ + \ + vfio_iowrite##size(val, io); \ + \ + if (test_mem) \ + up_read(&vdev->memory_lock); \ + \ + return 0; \ +} + +VFIO_IOWRITE(8) +VFIO_IOWRITE(16) +VFIO_IOWRITE(32) +#ifdef iowrite64 +VFIO_IOWRITE(64) +#endif + +#define VFIO_IOREAD(size) \ +static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \ + bool test_mem, u##size *val, void __iomem *io) \ +{ \ + if (test_mem) { \ + down_read(&vdev->memory_lock); \ + if (!__vfio_pci_memory_enabled(vdev)) { \ + up_read(&vdev->memory_lock); \ + return -EIO; \ + } \ + } \ + \ + *val = vfio_ioread##size(io); \ + \ + if (test_mem) \ + up_read(&vdev->memory_lock); \ + \ + return 0; \ +} + +VFIO_IOREAD(8) +VFIO_IOREAD(16) +VFIO_IOREAD(32) + /* * Read or write from an __iomem region (MMIO or I/O port) with an excluded * range which is inaccessible. The excluded range drops writes and fills * reads with -1. This is intended for handling MSI-X vector tables and * leftover space for ROM BARs. */ -static ssize_t do_io_rw(void __iomem *io, char __user *buf, +static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem, + void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, size_t x_end, bool iswrite) { ssize_t done = 0; + int ret; while (count) { size_t fillable, filled; @@ -66,9 +119,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf, if (copy_from_user(&val, buf, 4)) return -EFAULT; - vfio_iowrite32(val, io + off); + ret = vfio_pci_iowrite32(vdev, test_mem, + val, io + off); + if (ret) + return ret; } else { - val = vfio_ioread32(io + off); + ret = vfio_pci_ioread32(vdev, test_mem, + &val, io + off); + if (ret) + return ret; if (copy_to_user(buf, &val, 4)) return -EFAULT; @@ -82,9 +141,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf, if (copy_from_user(&val, buf, 2)) return -EFAULT; - vfio_iowrite16(val, io + off); + ret = vfio_pci_iowrite16(vdev, test_mem, + val, io + off); + if (ret) + return ret; } else { - val = vfio_ioread16(io + off); + ret = vfio_pci_ioread16(vdev, test_mem, + &val, io + off); + if (ret) + return ret; if (copy_to_user(buf, &val, 2)) return -EFAULT; @@ -98,9 +163,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf, if (copy_from_user(&val, buf, 1)) return -EFAULT; - vfio_iowrite8(val, io + off); + ret = vfio_pci_iowrite8(vdev, test_mem, + val, io + off); + if (ret) + return ret; } else { - val = vfio_ioread8(io + off); + ret = vfio_pci_ioread8(vdev, test_mem, + &val, io + off); + if (ret) + return ret; if (copy_to_user(buf, &val, 1)) return -EFAULT; @@ -178,14 +249,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); - if (res->flags & IORESOURCE_MEM) { - down_read(&vdev->memory_lock); - if (!__vfio_pci_memory_enabled(vdev)) { - up_read(&vdev->memory_lock); - return -EIO; - } - } - if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -213,7 +276,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, x_end = vdev->msix_offset + vdev->msix_size; } - done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite); + done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, + count, x_start, x_end, iswrite); if (done >= 0) *ppos += done; @@ -221,9 +285,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); out: - if (res->flags & IORESOURCE_MEM) - up_read(&vdev->memory_lock); - return done; } @@ -278,7 +339,12 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, return ret; } - done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite); + /* + * VGA MMIO is a legacy, non-BAR resource that hopefully allows + * probing, so we don't currently worry about access in relation + * to the memory enable bit in the command register. + */ + done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite); vga_put(vdev->pdev, rsrc); @@ -296,17 +362,21 @@ static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) switch (ioeventfd->count) { case 1: - vfio_iowrite8(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; case 2: - vfio_iowrite16(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; case 4: - vfio_iowrite32(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; #ifdef iowrite64 case 8: - vfio_iowrite64(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; #endif } @@ -378,11 +448,13 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, goto out_unlock; } + ioeventfd->vdev = vdev; ioeventfd->addr = vdev->barmap[bar] + pos; ioeventfd->data = data; ioeventfd->pos = pos; ioeventfd->bar = bar; ioeventfd->count = count; + ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM; ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler, NULL, NULL, &ioeventfd->virqfd, fd); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 6990fc711a80b4c5ff75ae918740d50307550410..5fbf0c1f7433808b4414d91331c1c3f561d0b3ff 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1424,13 +1424,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1448,6 +1451,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1477,7 +1485,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1486,14 +1494,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; } /* @@ -2378,7 +2439,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) break; case VFIO_TYPE1_NESTING_IOMMU: iommu->nesting = true; - /* fall through */ + fallthrough; case VFIO_TYPE1v2_IOMMU: iommu->v2 = true; break; diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c index 1f0ca6e44410ac2ec8c47826b7f90eb8ea56694d..34aec4ba331ecd27a83253bb687ea6bae824fa02 100644 --- a/drivers/vhost/iotlb.c +++ b/drivers/vhost/iotlb.c @@ -159,8 +159,8 @@ vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last) EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first); /** - * vhost_iotlb_itree_first - return the next overlapped range - * @iotlb: the IOTLB + * vhost_iotlb_itree_next - return the next overlapped range + * @map: the starting map node * @start: start of IOVA range * @end: end of IOVA range */ diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5857d4eec9d73f69c5a934e9cff635123773acfd..b45519ca66a7e9a0d0ff5fe9ead216db6297fded 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2537,7 +2537,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) - vq_err(vq, "Failed to enable notification at %p: %d\n", + vq_err(vq, "Failed to disable notification at %p: %d\n", &vq->used->flags, r); } } diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index ddc7f5f0401fdc59fb7d7ac9df280641d710b035..8ec19425671fb9d7ae941030e86b6ea3d036df53 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -681,7 +681,7 @@ static int adp8860_probe(struct i2c_client *client, switch (ADP8860_MANID(reg_val)) { case ADP8863_MANUFID: data->gdwn_dis = !!pdata->gdwn_dis; - /* fall through */ + fallthrough; case ADP8860_MANUFID: data->en_ambl_sens = !!pdata->en_ambl_sens; break; diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index a88dcb63eeb4582502217ce2477b53d1f121129a..2df56bd303d25d125c6a7813ed0bf2787abc1ec9 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -49,6 +49,8 @@ #include #ifdef CONFIG_PPC_PMAC #include +#endif +#ifdef CONFIG_BOOTX_TEXT #include #endif diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 8a31fc2b2258a79c969d1913a7b629daa6983fc7..66167830fefd1aa9150a4f98e57a0bbdb09208c3 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2191,6 +2191,9 @@ static void updatescrollmode(struct fbcon_display *p, } } +#define PITCH(w) (((w) + 7) >> 3) +#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ + static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { @@ -2200,6 +2203,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; + if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) { + int size; + int pitch = PITCH(vc->vc_font.width); + + /* + * If user font, ensure that a possible change to user font + * height or width will not allow a font data out-of-bounds access. + * NOTE: must use original charcount in calculation as font + * charcount can change and cannot be used to determine the + * font data allocated size. + */ + if (pitch <= 0) + return -EINVAL; + size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data)); + if (size > FNTSIZE(vc->vc_font.data)) + return -EINVAL; + } + virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, @@ -2652,7 +2673,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, int size; int i, csum; u8 *new_data, *data = font->data; - int pitch = (font->width+7) >> 3; + int pitch = PITCH(font->width); /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ @@ -2668,7 +2689,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = h * pitch * charcount; + size = CALC_FONTSZ(h, pitch, charcount); new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 65491ae74808d75ba852ce90efb0b857709c85b5..e57c00824965ca6656b456ab733b0a5ef7ee5c93 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (efi_enabled(EFI_BOOT) && + if (efi_enabled(EFI_MEMMAP) && !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index e4c3c8b65da44f9340b0d06394953657f591638f..02411d89cb462e2f98b0960f480f3fafb7abae91 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -648,13 +648,13 @@ static int synthvid_connect_vsp(struct hv_device *hdev) ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10); if (!ret) break; - /* Fallthrough */ + fallthrough; case VERSION_WIN8: case VERSION_WIN8_1: ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8); if (!ret) break; - /* Fallthrough */ + fallthrough; case VERSION_WS2008: case VERSION_WIN7: ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7); diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c index 01c75c031cb6a523751267af45cdc22acbc78e26..39ebbe026ddf5cc34e1bbf31f5b67d66b971fa85 100644 --- a/drivers/video/fbdev/mmp/fb/mmpfb.c +++ b/drivers/video/fbdev/mmp/fb/mmpfb.c @@ -90,8 +90,6 @@ static int var_to_pixfmt(struct fb_var_screeninfo *var) else return PIXFMT_BGR888UNPACK; } - - /* fall through */ } return -EINVAL; diff --git a/drivers/video/fbdev/nvidia/nv_hw.c b/drivers/video/fbdev/nvidia/nv_hw.c index 8335da4ca30e28dbcd5fa7fb96bdbd91db26de04..9b0a324bb1b481573727476660a45a2a8eeb04a9 100644 --- a/drivers/video/fbdev/nvidia/nv_hw.c +++ b/drivers/video/fbdev/nvidia/nv_hw.c @@ -896,7 +896,7 @@ void NVCalcStateExt(struct nvidia_par *par, if (!par->FlatPanel) state->control = NV_RD32(par->PRAMDAC0, 0x0580) & 0xeffffeff; - /* fallthrough */ + fallthrough; case NV_ARCH_10: case NV_ARCH_20: case NV_ARCH_30: diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c index eedfbd3572a8faa41db5a80c2704e7fba5174ff1..47e6a1d0d2293ef1f5a4d7ec0a064153bfcbe9f0 100644 --- a/drivers/video/fbdev/pxa168fb.c +++ b/drivers/video/fbdev/pxa168fb.c @@ -60,8 +60,6 @@ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) else return PIX_FMT_BGR1555; } - - /* fall through */ } /* @@ -87,8 +85,6 @@ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) else return PIX_FMT_BGR888UNPACK; } - - /* fall through */ } return -EINVAL; diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 9b3493846f4d912c704eeaaed70248e853702d36..ce55b9d2e862bbfa8e4bc9a80d1806f9cbb23ccc 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -1093,7 +1093,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) break; case 9 ... 15: var->green.length = 5; - /* fall through */ + fallthrough; case 16: var->bits_per_pixel = 16; /* The Riva128 supports RGB555 only */ diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c index ac140962b1bfba035238750f96404a1cb5a2b414..03c736f6f3d086d2febcdd900b47d1a9171f185e 100644 --- a/drivers/video/fbdev/sis/sis_main.c +++ b/drivers/video/fbdev/sis/sis_main.c @@ -1739,7 +1739,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_GET_INFO: /* For communication with X driver */ ivideo->sisfb_infoblock.sisfb_id = SISFB_ID; ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR; @@ -1793,7 +1793,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_GET_VBRSTATUS: if(sisfb_CheckVBRetrace(ivideo)) return put_user((u32)1, argp); @@ -1804,7 +1804,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_GET_AUTOMAXIMIZE: if(ivideo->sisfb_max) return put_user((u32)1, argp); @@ -1815,7 +1815,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); - /* fall through */ + fallthrough; case SISFB_SET_AUTOMAXIMIZE: if(get_user(gpu32, argp)) return -EFAULT; diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index de953ddb631243ad0ebceeef67e53b6266b12fb0..265865610edc695cad8e3d6196bb81a494a93920 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -999,7 +999,7 @@ stifb_blank(int blank_mode, struct fb_info *info) case S9000_ID_HCRX: HYPER_ENABLE_DISABLE_DISPLAY(fb, enable); break; - case S9000_ID_A1659A: /* fall through */ + case S9000_ID_A1659A: case S9000_ID_TIMBER: case CRX24_OVERLAY_PLANES: default: @@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) dev_name); goto out_err0; } - /* fall through */ + fallthrough; case S9000_ID_ARTIST: case S9000_ID_HCRX: case S9000_ID_TIMBER: diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 52f273af6caeec5122db95dfbfdf135c1d7203fa..1e8a38a7967d890a9e44fb245e2c6ce11f7b0f5c 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i char oldop = setop(0); char oldsr = setsr(0); char oldmask = selectmask(); - const char *cdat = image->data; + const unsigned char *cdat = image->data; u32 dx = image->dx; char __iomem *where; int y; diff --git a/drivers/video/fbdev/via/lcd.c b/drivers/video/fbdev/via/lcd.c index 3fea01db58d630297c1bb7e72bdc9903c79725ff..4a869402d120d37b1d2476f5e06d08baee1dcd96 100644 --- a/drivers/video/fbdev/via/lcd.c +++ b/drivers/video/fbdev/via/lcd.c @@ -744,7 +744,7 @@ static void set_lcd_output_path(int set_iga, int output_interface) viaparinfo->chip_info->gfx_chip_name)) viafb_write_reg_mask(CR97, VIACR, 0x84, BIT7 + BIT2 + BIT1 + BIT0); - /* fall through */ + fallthrough; case INTERFACE_DVP0: case INTERFACE_DVP1: case INTERFACE_DFP_HIGH: diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c index 9673eb12dacd06d5577e312f3dc8f46aa5188ddc..f22ebe89fe137a0733cd6afda15c11238c4a1277 100644 --- a/drivers/watchdog/sc1200wdt.c +++ b/drivers/watchdog/sc1200wdt.c @@ -234,7 +234,7 @@ static long sc1200wdt_ioctl(struct file *file, unsigned int cmd, return -EINVAL; timeout = new_timeout; sc1200wdt_write_data(WDTO, timeout); - /* fall through - and return the new timeout */ + fallthrough; /* and return the new timeout */ case WDIOC_GETTIMEOUT: return put_user(timeout * 60, p); diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index 184a06a74f83a225c3289a53f2d98197523718f4..c00627825de88ac39aed5ccf87b424c086c1d88e 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -332,7 +332,7 @@ static long wdrtas_ioctl(struct file *file, unsigned int cmd, wdrtas_interval = i; else wdrtas_interval = wdrtas_get_interval(i); - /* fallthrough */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(wdrtas_interval, argp); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index ea6c1e7e3e421a3257c2754ee635705e5e5926b9..41645fe6ad48a2902e5dbb1827b859e0e3e39ecc 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -325,4 +325,14 @@ config XEN_HAVE_VPMU config XEN_FRONT_PGDIR_SHBUF tristate +config XEN_UNPOPULATED_ALLOC + bool "Use unpopulated memory ranges for guest mappings" + depends on X86 && ZONE_DEVICE + default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0 + help + Use unpopulated memory ranges in order to create mappings for guest + memory regions, including grant maps and foreign pages. This avoids + having to balloon out RAM regions in order to obtain physical memory + space to create such mappings. + endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index c25c9a699b483731d453d8ff6efcf75d4ade4a31..babdca808861da9e5d54c58c4f4523c885159b29 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -41,3 +41,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o +obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 37ffccda8bb8791818641f15f9d3639b64781731..51427c752b37b9aa748d1c3194bdd6da93f881f1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -653,7 +653,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) } EXPORT_SYMBOL(free_xenballooned_pages); -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { @@ -707,7 +707,7 @@ static int __init balloon_init(void) register_sysctl_table(xen_root); #endif -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) { int i; diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 140c7bf33a989948e946f486edd37b560f0ede67..90b8f56fbadb1a3b1fea483cc9f8e9c1847942c7 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -156,7 +156,7 @@ int get_evtchn_to_irq(evtchn_port_t evtchn) /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { - return irq_get_handler_data(irq); + return irq_get_chip_data(irq); } /* Constructors for packed IRQ information. */ @@ -377,7 +377,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; - irq_set_handler_data(irq, info); + irq_set_chip_data(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } @@ -426,14 +426,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; list_del(&info->list); - irq_set_handler_data(irq, NULL); + irq_set_chip_data(irq, NULL); WARN_ON(info->refcnt > 0); @@ -603,7 +603,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (info->refcnt > 0) { info->refcnt--; @@ -1108,7 +1108,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; @@ -1142,7 +1142,7 @@ int evtchn_make_refcounted(evtchn_port_t evtchn) if (irq == -1) return -ENOENT; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) return -ENOENT; @@ -1170,7 +1170,7 @@ int evtchn_get(evtchn_port_t evtchn) if (irq == -1) goto done; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) goto done; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 8d06bf1cc3479c5a205b0d2f05782416f0454f3a..523dcdf39cc944d45b8275845bfe2335aae72302 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) { int ret; - ret = alloc_xenballooned_pages(nr_pages, pages); + ret = xen_alloc_unpopulated_pages(nr_pages, pages); if (ret < 0) return ret; @@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private); void gnttab_free_pages(int nr_pages, struct page **pages) { gnttab_pages_clear_private(nr_pages, pages); - free_xenballooned_pages(nr_pages, pages); + xen_free_unpopulated_pages(nr_pages, pages); } EXPORT_SYMBOL_GPL(gnttab_free_pages); diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 63abe6c3642b579b528ca21bff0a92a3af89a0c3..b0c73c58f9874a34f81fcd4477cc752cdfdbf381 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -424,7 +424,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) if (pages == NULL) return -ENOMEM; - rc = alloc_xenballooned_pages(numpgs, pages); + rc = xen_alloc_unpopulated_pages(numpgs, pages); if (rc != 0) { pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, numpgs, rc); @@ -895,7 +895,7 @@ static void privcmd_close(struct vm_area_struct *vma) rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); if (rc == 0) - free_xenballooned_pages(numpgs, pages); + xen_free_unpopulated_pages(numpgs, pages); else pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", numpgs, rc); diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index b43b5595e988024832bde33724f34147661857cf..72d725a0ab5ce1f4afcac3684c92a55ebaa5f726 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -1263,7 +1263,7 @@ static void pvcalls_front_changed(struct xenbus_device *dev, if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state */ - /* fall through */ + fallthrough; case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c new file mode 100644 index 0000000000000000000000000000000000000000..3b98dc921426898c3dd884bafc65088d215e59de --- /dev/null +++ b/drivers/xen/unpopulated-alloc.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +static DEFINE_MUTEX(list_lock); +static LIST_HEAD(page_list); +static unsigned int list_count; + +static int fill_list(unsigned int nr_pages) +{ + struct dev_pagemap *pgmap; + void *vaddr; + unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); + int ret; + + pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); + if (!pgmap) + return -ENOMEM; + + pgmap->type = MEMORY_DEVICE_GENERIC; + pgmap->res.name = "Xen scratch"; + pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + ret = allocate_resource(&iomem_resource, &pgmap->res, + alloc_pages * PAGE_SIZE, 0, -1, + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); + if (ret < 0) { + pr_err("Cannot allocate new IOMEM resource\n"); + kfree(pgmap); + return ret; + } + +#ifdef CONFIG_XEN_HAVE_PVMMU + /* + * memremap will build page tables for the new memory so + * the p2m must contain invalid entries so the correct + * non-present PTEs will be written. + * + * If a failure occurs, the original (identity) p2m entries + * are not restored since this region is now known not to + * conflict with any devices. + */ + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); + + for (i = 0; i < alloc_pages; i++) { + if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { + pr_warn("set_phys_to_machine() failed, no memory added\n"); + release_resource(&pgmap->res); + kfree(pgmap); + return -ENOMEM; + } + } + } +#endif + + vaddr = memremap_pages(pgmap, NUMA_NO_NODE); + if (IS_ERR(vaddr)) { + pr_err("Cannot remap memory range\n"); + release_resource(&pgmap->res); + kfree(pgmap); + return PTR_ERR(vaddr); + } + + for (i = 0; i < alloc_pages; i++) { + struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); + + BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); + list_add(&pg->lru, &page_list); + list_count++; + } + + return 0; +} + +/** + * xen_alloc_unpopulated_pages - alloc unpopulated pages + * @nr_pages: Number of pages + * @pages: pages returned + * @return 0 on success, error otherwise + */ +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) +{ + unsigned int i; + int ret = 0; + + mutex_lock(&list_lock); + if (list_count < nr_pages) { + ret = fill_list(nr_pages - list_count); + if (ret) + goto out; + } + + for (i = 0; i < nr_pages; i++) { + struct page *pg = list_first_entry_or_null(&page_list, + struct page, + lru); + + BUG_ON(!pg); + list_del(&pg->lru); + list_count--; + pages[i] = pg; + +#ifdef CONFIG_XEN_HAVE_PVMMU + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + ret = xen_alloc_p2m_entry(page_to_pfn(pg)); + if (ret < 0) { + unsigned int j; + + for (j = 0; j <= i; j++) { + list_add(&pages[j]->lru, &page_list); + list_count++; + } + goto out; + } + } +#endif + } + +out: + mutex_unlock(&list_lock); + return ret; +} +EXPORT_SYMBOL(xen_alloc_unpopulated_pages); + +/** + * xen_free_unpopulated_pages - return unpopulated pages + * @nr_pages: Number of pages + * @pages: pages to return + */ +void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) +{ + unsigned int i; + + mutex_lock(&list_lock); + for (i = 0; i < nr_pages; i++) { + list_add(&pages[i]->lru, &page_list); + list_count++; + } + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL(xen_free_unpopulated_pages); + +#ifdef CONFIG_XEN_PV +static int __init init(void) +{ + unsigned int i; + + if (!xen_domain()) + return -ENODEV; + + if (!xen_pv_domain()) + return 0; + + /* + * Initialize with pages from the extra memory regions (see + * arch/x86/xen/setup.c). + */ + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + unsigned int j; + + for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { + struct page *pg = + pfn_to_page(xen_extra_mem[i].start_pfn + j); + + list_add(&pg->lru, &page_list); + list_count++; + } + } + + return 0; +} +subsys_initcall(init); +#endif diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c index 7457213b89156a72c6ca822d022f799097e5d42b..f914b72557efb0379cb208c90a715c2fe3006982 100644 --- a/drivers/xen/xen-acpi-memhotplug.c +++ b/drivers/xen/xen-acpi-memhotplug.c @@ -229,7 +229,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) case ACPI_NOTIFY_BUS_CHECK: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived BUS CHECK notification for device\n")); - /* Fall Through */ + fallthrough; case ACPI_NOTIFY_DEVICE_CHECK: if (event == ACPI_NOTIFY_DEVICE_CHECK) ACPI_DEBUG_PRINT((ACPI_DB_INFO, diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index f2115587855f7d629910f2ec57726227b9aa0844..b500466a6c371f125dc12b2f2bccd8a699c06778 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -545,7 +545,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev, xenbus_switch_state(xdev, XenbusStateClosed); if (xenbus_dev_is_online(xdev)) break; - /* fall through - if not online */ + fallthrough; /* if not online */ case XenbusStateUnknown: dev_dbg(&xdev->dev, "frontend is gone! unregister device\n"); device_unregister(&xdev->dev); diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 75c0a2e9a6db55b72d284af345548239868205ad..1e8cfd80a4e6b45c066811156b82a9f63ea4c725 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -1185,7 +1185,7 @@ static void scsiback_frontend_changed(struct xenbus_device *dev, xenbus_switch_state(dev, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through - if not online */ + fallthrough; /* if not online */ case XenbusStateUnknown: device_unregister(&dev->dev); break; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 786fbb7d8be063f82d65a1fcaef8a627ea10c0b6..2690318ad50f487ad525d1653ee570f0d3772384 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -379,8 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, int i, j; for (i = 0; i < nr_pages; i++) { - err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_gfn(vaddr), 0); + unsigned long gfn; + + if (is_vmalloc_addr(vaddr)) + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); + else + gfn = virt_to_gfn(vaddr); + + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); @@ -615,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, bool leaked = false; unsigned int nr_pages = XENBUS_PAGES(nr_grefs); - err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); + err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); if (err) goto out_err; @@ -656,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev, addr, nr_pages); out_free_ballooned_pages: if (!leaked) - free_xenballooned_pages(nr_pages, node->hvm.pages); + xen_free_unpopulated_pages(nr_pages, node->hvm.pages); out_err: return err; } @@ -852,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) info.addrs); if (!rv) { vunmap(vaddr); - free_xenballooned_pages(nr_pages, node->hvm.pages); + xen_free_unpopulated_pages(nr_pages, node->hvm.pages); } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 15379089853babf0d833271362bb663f1d2e0be6..480944606a3c9910ea8df4a13d3905a51ac11dff 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -401,12 +401,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state) case XenbusStateConnected: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing); xenbus_reset_wait_for_backend(be, XenbusStateClosing); - /* fall through */ + fallthrough; case XenbusStateClosing: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed); xenbus_reset_wait_for_backend(be, XenbusStateClosed); - /* fall through */ + fallthrough; case XenbusStateClosed: xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 7b1077f0abcb0839d89a299de2a9d9d60760943a..34742c6e189e386a5120ad67667faae3031ab25c 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, kfree(pages); return -ENOMEM; } - rc = alloc_xenballooned_pages(nr_pages, pages); + rc = xen_alloc_unpopulated_pages(nr_pages, pages); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__, nr_pages, rc); @@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, if (!vaddr) { pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__, nr_pages, rc); - free_xenballooned_pages(nr_pages, pages); + xen_free_unpopulated_pages(nr_pages, pages); kfree(pages); kfree(pfns); return -ENOMEM; diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 92cd1d80218d70acc842dc5942581f069e87dbdc..3576123d82990e61dca4b3bbc8dabb1ebd0cea6a 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -213,7 +213,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) break; default: WARN_ONCE(1, "unknown lock status code: %d\n", status); - /* fall through */ + fallthrough; case P9_LOCK_ERROR: case P9_LOCK_GRACE: res = -ENOLCK; diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c index 30d526fecc3f40fbd69c0b216e2f89a6fe44e3e2..05e963402e25f05f62649d9939bfe8f63ea6dcc6 100644 --- a/fs/adfs/dir_f.c +++ b/fs/adfs/dir_f.c @@ -18,11 +18,11 @@ static inline unsigned int adfs_readval(unsigned char *p, int len) switch (len) { case 4: val |= p[3] << 24; - /* fall through */ + fallthrough; case 3: val |= p[2] << 16; - /* fall through */ + fallthrough; case 2: val |= p[1] << 8; - /* fall through */ + fallthrough; default: val |= p[0]; } return val; @@ -32,11 +32,11 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val) { switch (len) { case 4: p[3] = val >> 24; - /* fall through */ + fallthrough; case 3: p[2] = val >> 16; - /* fall through */ + fallthrough; case 2: p[1] = val >> 8; - /* fall through */ + fallthrough; default: p[0] = val; } } diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index f708c45d5f664d84475f6a42214d0732c43325b9..29f11e10a7c7d213623063b12b5e98e5bd01a85f 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index a26a0f96c1197a10bd5c4ac33cc8650100fe3c2f..d91b0133d95da5e225e01f3a6e9ef76b89c575d0 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -429,6 +429,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -438,7 +456,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -795,6 +813,12 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a346cf7659f19804db57d64d6702c5d2130b7230..044412110b52354f927a49603a6f164785cc8e8c 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -93,7 +93,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) case ST_ROOT: inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; - /* fall through */ + fallthrough; case ST_USERDIR: if (be32_to_cpu(tail->stype) == ST_USERDIR || affs_test_opt(sbi->s_flags, SF_SETMODE)) { diff --git a/fs/affs/super.c b/fs/affs/super.c index 47107c6712a6134fb815066816a9e32389b8dfcd..a100cd9950c8897453c4a25268ba2888be8c0ae6 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -474,7 +474,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) case MUFS_INTLFFS: case MUFS_DCFFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall thru */ + fallthrough; case FS_INTLFFS: case FS_DCFFS: affs_set_opt(sbi->s_flags, SF_INTL); @@ -486,7 +486,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) break; case MUFS_OFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall through */ + fallthrough; case FS_OFS: affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; @@ -494,7 +494,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) case MUFS_DCOFS: case MUFS_INTLOFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall through */ + fallthrough; case FS_DCOFS: case FS_INTLOFS: affs_set_opt(sbi->s_flags, SF_INTL); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index bef413818af7393d0cd42aae8c08435df3a16f9c..a4e9e6e07e9397321f61567a047f3fafb574fa62 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -252,7 +252,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the FID array and its count in two steps */ - /* fall through */ + fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); @@ -271,7 +271,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) afs_extract_to_buf(call, call->count * 3 * 4); call->unmarshall++; - /* Fall through */ + fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, true); @@ -297,7 +297,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the callback array and its count in two steps */ - /* fall through */ + fallthrough; case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); @@ -312,7 +312,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4); call->unmarshall++; - /* Fall through */ + fallthrough; case 4: _debug("extract discard %zu/%u", iov_iter_count(call->iter), call->count2 * 3 * 4); @@ -391,7 +391,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); @@ -503,7 +503,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); @@ -618,7 +618,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the FID array and its count in two steps */ - /* Fall through */ + fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); @@ -637,7 +637,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, false); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index b79879aacc02e80ddaa7cfdcd1706d547eca9a6d..7b784af604fd9fb125c71ab41720828448647e9c 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -382,15 +382,17 @@ void afs_dynroot_depopulate(struct super_block *sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); - inode_lock(root->d_inode); - - /* Remove all the pins for dirs created for manually added cells */ - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { - if (subdir->d_fsdata) { - subdir->d_fsdata = NULL; - dput(subdir); + if (root) { + inode_lock(root->d_inode); + + /* Remove all the pins for dirs created for manually added cells */ + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { + if (subdir->d_fsdata) { + subdir->d_fsdata = NULL; + dput(subdir); + } } - } - inode_unlock(root->d_inode); + inode_unlock(root->d_inode); + } } diff --git a/fs/afs/file.c b/fs/afs/file.c index 6f6ed1605cfe30bc9131b898368d50917692662d..371d1488cc549db412a75045b7bfaf5b4d37204e 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -311,7 +311,7 @@ int afs_page_filler(void *data, struct page *page) case -ENOBUFS: _debug("cache said ENOBUFS"); - /* fall through */ + fallthrough; default: go_on: req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); diff --git a/fs/afs/flock.c b/fs/afs/flock.c index ffb8575345ca748ecbb2e6fd1c4eef217b8c7375..cb3054c7843ea49355409eec8acb68de82ef4679 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -376,7 +376,6 @@ void afs_lock_work(struct work_struct *work) spin_unlock(&vnode->lock); return; - /* Fall through */ default: /* Looks like a lock request was withdrawn. */ spin_unlock(&vnode->lock); diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index 24fd163c6323e5b28616ecff2d255941c50aa40f..97cab12b0a6c252380e3f317eac3aaba4878e894 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -235,6 +235,7 @@ int afs_put_operation(struct afs_operation *op) afs_end_cursor(&op->ac); afs_put_serverlist(op->net, op->server_list); afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op); + key_put(op->key); kfree(op); return ret; } diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 5d9ef517cf8162335c592ee2c57338f344e979de..e7e98ad63a91ae8ea126a2ee646814a7ec20ab41 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -161,8 +161,8 @@ void afs_fileserver_probe_result(struct afs_call *call) } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index acb4d0ca264908704388880e4938f7a0594a9f47..1d95ed9dd86e601f6068ec3bf500ebcc08b9d8e9 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -320,7 +320,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->tmp_u = htonl(0); afs_extract_to_tmp(call); } - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -348,7 +348,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -375,7 +375,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + fallthrough; case 3: _debug("extract discard %zu/%llu", @@ -388,7 +388,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) no_more_data: call->unmarshall = 4; afs_extract_to_buf(call, (21 + 3 + 6) * 4); - /* Fall through */ + fallthrough; /* extract the metadata */ case 4: @@ -1343,7 +1343,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, 12 * 4); - /* Fall through */ + fallthrough; /* extract the returned status record */ case 1: @@ -1356,7 +1356,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* extract the volume name length */ case 2: @@ -1371,7 +1371,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the volume name */ case 3: @@ -1385,7 +1385,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message length */ case 4: @@ -1400,7 +1400,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message */ case 5: @@ -1415,7 +1415,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day length */ case 6: @@ -1430,7 +1430,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day */ case 7: @@ -1682,7 +1682,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the capabilities word count */ case 1: @@ -1696,7 +1696,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) call->count2 = count; afs_extract_discard(call, count * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract capabilities words */ case 2: @@ -1776,7 +1776,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file status count and array in two steps */ case 1: @@ -1794,7 +1794,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, 21 * sizeof(__be32)); - /* Fall through */ + fallthrough; case 2: _debug("extract status array %u", call->count); @@ -1824,7 +1824,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* Extract the callback count and array in two steps */ case 3: @@ -1841,7 +1841,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, 3 * sizeof(__be32)); - /* Fall through */ + fallthrough; case 4: _debug("extract CB array"); @@ -1870,7 +1870,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, 6 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 5: ret = afs_extract_data(call, false); @@ -1974,7 +1974,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -1992,7 +1992,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) acl->size = call->count2; afs_extract_begin(call, acl->data, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -2002,7 +2002,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) afs_extract_to_buf(call, (21 + 6) * 4); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the metadata */ case 3: diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 792ac711985eb5464d692d5946a3716c67390604..18042b7dab6a8791e3f0b9d1abb2de61b71cccc3 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -401,22 +401,24 @@ struct afs_vlserver { #define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */ #define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */ #define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */ +#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */ rwlock_t lock; /* Lock on addresses */ atomic_t usage; + unsigned int rtt; /* Server's current RTT in uS */ /* Probe state */ wait_queue_head_t probe_wq; atomic_t probe_outstanding; spinlock_t probe_lock; struct { - unsigned int rtt; /* RTT as ktime/64 */ + unsigned int rtt; /* RTT in uS */ u32 abort_code; short error; - bool have_result; - bool responded:1; - bool is_yfs:1; - bool not_yfs:1; - bool local_failure:1; + unsigned short flags; +#define AFS_VLSERVER_PROBE_RESPONDED 0x01 /* At least once response (may be abort) */ +#define AFS_VLSERVER_PROBE_IS_YFS 0x02 /* The peer appears to be YFS */ +#define AFS_VLSERVER_PROBE_NOT_YFS 0x04 /* The peer appears not to be YFS */ +#define AFS_VLSERVER_PROBE_LOCAL_FAILURE 0x08 /* A local failure prevented a probe */ } probe; u16 port; diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 5334f1bd2bca7cdca9c97648e571375a7bd1d558..1d1a8debe4723e4b0d5a69e892cd4389f07ef1c8 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -120,42 +120,42 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code) if (e->error == -ETIMEDOUT || e->error == -ETIME) return; - /* Fall through */ + fallthrough; case -ETIMEDOUT: case -ETIME: if (e->error == -ENOMEM || e->error == -ENONET) return; - /* Fall through */ + fallthrough; case -ENOMEM: case -ENONET: if (e->error == -ERFKILL) return; - /* Fall through */ + fallthrough; case -ERFKILL: if (e->error == -EADDRNOTAVAIL) return; - /* Fall through */ + fallthrough; case -EADDRNOTAVAIL: if (e->error == -ENETUNREACH) return; - /* Fall through */ + fallthrough; case -ENETUNREACH: if (e->error == -EHOSTUNREACH) return; - /* Fall through */ + fallthrough; case -EHOSTUNREACH: if (e->error == -EHOSTDOWN) return; - /* Fall through */ + fallthrough; case -EHOSTDOWN: if (e->error == -ECONNREFUSED) return; - /* Fall through */ + fallthrough; case -ECONNREFUSED: if (e->error == -ECONNRESET) return; - /* Fall through */ + fallthrough; case -ECONNRESET: /* Responded, but call expired. */ if (e->responded) return; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index e817fc740ba019bfb27b1e6d9af5a538f1152d79..e8babb62ed44230d2a9de5dc6db3ad0b50bc8710 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -310,6 +310,11 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) alist->preferred == i ? '>' : '-', &alist->addrs[i].transport); } + seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt); + seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n", + vlserver->probe.flags, vlserver->probe.error, + vlserver->probe.abort_code, + atomic_read(&vlserver->probe_outstanding)); return 0; } diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 6a0935cb822f3a5e2c65af9e1106ba2757f9efbc..d83f13c44b9213878ddb20ad4c914152994565d5 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_operation *op) case -ETIME: if (op->error != -EDESTADDRREQ) goto iterate_address; - /* Fall through */ + fallthrough; case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8fc8fb406a5a2b26c73995db2e4670709292670b..8be709cb8542aab247fb390063a51bef8eb8849c 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -568,7 +568,7 @@ static void afs_deliver_to_call(struct afs_call *call) case -EIO: pr_err("kAFS: Call %u in bad state %u\n", call->debug_id, state); - /* Fall through */ + fallthrough; case -ENODATA: case -EBADMSG: case -EMSGSIZE: @@ -669,7 +669,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call, ret = call->ret0; call->ret0 = 0; - /* Fall through */ + fallthrough; case -ECONNABORTED: ac->responded = true; break; @@ -872,7 +872,7 @@ void afs_send_empty_reply(struct afs_call *call) _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, RX_USER_ABORT, -ENOMEM, "KOO"); - /* Fall through */ + fallthrough; default: _leave(" [error]"); return; diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c index 8fea54eba0c2b2c76f2cb0717b0be67ecad6a6cd..38b2ba1d9ec0dfb3d5e98ad3ab3623089a6425db 100644 --- a/fs/afs/vl_list.c +++ b/fs/afs/vl_list.c @@ -21,6 +21,7 @@ struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len, rwlock_init(&vlserver->lock); init_waitqueue_head(&vlserver->probe_wq); spin_lock_init(&vlserver->probe_lock); + vlserver->rtt = UINT_MAX; vlserver->name_len = name_len; vlserver->port = port; memcpy(vlserver->name, name, name_len); diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c index e3aa013c21779241e399014b504a3978e5fe7c35..d1c7068b4346f409d954d5d3ecc4ef28cc9e00e5 100644 --- a/fs/afs/vl_probe.c +++ b/fs/afs/vl_probe.c @@ -11,15 +11,33 @@ #include "internal.h" #include "protocol_yfs.h" -static bool afs_vl_probe_done(struct afs_vlserver *server) + +/* + * Handle the completion of a set of probes. + */ +static void afs_finished_vl_probe(struct afs_vlserver *server) { - if (!atomic_dec_and_test(&server->probe_outstanding)) - return false; + if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { + server->rtt = UINT_MAX; + clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); + } - wake_up_var(&server->probe_outstanding); clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); - return true; +} + +/* + * Handle the completion of a probe RPC call. + */ +static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up) +{ + if (atomic_dec_and_test(&server->probe_outstanding)) { + afs_finished_vl_probe(server); + wake_up = true; + } + + if (wake_up) + wake_up_all(&server->probe_wq); } /* @@ -45,15 +63,20 @@ void afs_vlserver_probe_result(struct afs_call *call) server->probe.error = 0; goto responded; case -ECONNABORTED: - if (!server->probe.responded) { + if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { server->probe.abort_code = call->abort_code; server->probe.error = ret; } goto responded; case -ENOMEM: case -ENONET: - server->probe.local_failure = true; - afs_io_error(call, afs_io_error_vl_probe_fail); + case -EKEYEXPIRED: + case -EKEYREVOKED: + case -EKEYREJECTED: + server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE; + if (server->probe.error == 0) + server->probe.error = ret; + trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail); goto out; case -ECONNRESET: /* Responded, but call expired. */ case -ERFKILL: @@ -67,12 +90,12 @@ void afs_vlserver_probe_result(struct afs_call *call) default: clear_bit(index, &alist->responded); set_bit(index, &alist->failed); - if (!server->probe.responded && + if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) && (server->probe.error == 0 || server->probe.error == -ETIMEDOUT || server->probe.error == -ETIME)) server->probe.error = ret; - afs_io_error(call, afs_io_error_vl_probe_fail); + trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail); goto out; } @@ -81,39 +104,36 @@ void afs_vlserver_probe_result(struct afs_call *call) clear_bit(index, &alist->failed); if (call->service_id == YFS_VL_SERVICE) { - server->probe.is_yfs = true; + server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS; set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } else { - server->probe.not_yfs = true; - if (!server->probe.is_yfs) { + server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS; + if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) { clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; + server->rtt = rtt_us; alist->preferred = index; - have_result = true; } smp_wmb(); /* Set rtt before responded. */ - server->probe.responded = true; + server->probe.flags |= AFS_VLSERVER_PROBE_RESPONDED; set_bit(AFS_VLSERVER_FL_PROBED, &server->flags); + set_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); + have_result = true; out: spin_unlock(&server->probe_lock); _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", server_index, index, &alist->addrs[index].transport, rtt_us, ret); - have_result |= afs_vl_probe_done(server); - if (have_result) { - server->probe.have_result = true; - wake_up_var(&server->probe.have_result); - wake_up_all(&server->probe_wq); - } + afs_done_one_vl_probe(server, have_result); } /* @@ -151,11 +171,10 @@ static bool afs_do_probe_vlserver(struct afs_net *net, in_progress = true; } else { afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code); + afs_done_one_vl_probe(server, false); } } - if (!in_progress) - afs_vl_probe_done(server); return in_progress; } @@ -193,7 +212,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, { struct wait_queue_entry *waits; struct afs_vlserver *server; - unsigned int rtt = UINT_MAX; + unsigned int rtt = UINT_MAX, rtt_s; bool have_responders = false; int pref = -1, i; @@ -205,7 +224,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, server = vllist->servers[i].server; if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) __clear_bit(i, &untried); - if (server->probe.responded) + if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) have_responders = true; } } @@ -231,7 +250,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; - if (server->probe.responded) + if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) goto stop; if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) still_probing = true; @@ -249,10 +268,11 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; - if (server->probe.responded && - server->probe.rtt < rtt) { + rtt_s = READ_ONCE(server->rtt); + if (test_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags) && + rtt_s < rtt) { pref = i; - rtt = server->probe.rtt; + rtt = rtt_s; } remove_wait_queue(&server->probe_wq, &waits[i]); diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c index f405ca8b240a5ba7639361a1e55d7fad2b6c845b..c0458c903b310e630e9fe907a780785382cc0bf5 100644 --- a/fs/afs/vl_rotate.c +++ b/fs/afs/vl_rotate.c @@ -192,7 +192,8 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc) for (i = 0; i < vc->server_list->nr_servers; i++) { struct afs_vlserver *s = vc->server_list->servers[i].server; - if (!test_bit(i, &vc->untried) || !s->probe.responded) + if (!test_bit(i, &vc->untried) || + !test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags)) continue; if (s->probe.rtt < rtt) { vc->index = i; @@ -262,10 +263,14 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc) for (i = 0; i < vc->server_list->nr_servers; i++) { struct afs_vlserver *s = vc->server_list->servers[i].server; + if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags)) + e.responded = true; afs_prioritise_error(&e, READ_ONCE(s->probe.error), s->probe.abort_code); } + error = e.error; + failed_set_error: vc->error = error; failed: diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index fd82850cd4244efa5e4daa77d496d7952eca43b4..dc9327332f069067fe1204cab325916054e060bf 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -196,7 +196,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) /* Extract the returned uuid, uniquifier, nentries and * blkaddrs size */ - /* Fall through */ + fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -221,7 +221,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) count = min(call->count, 4U); afs_extract_to_buf(call, count * sizeof(__be32)); - /* Fall through - and extract entries */ + fallthrough; /* and extract entries */ case 2: ret = afs_extract_data(call, call->count > 4); if (ret < 0) @@ -324,7 +324,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through - and extract the capabilities word count */ + fallthrough; /* and extract the capabilities word count */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -337,7 +337,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) call->unmarshall++; afs_extract_discard(call, count * sizeof(__be32)); - /* Fall through - and extract capabilities words */ + fallthrough; /* and extract capabilities words */ case 2: ret = afs_extract_data(call, false); if (ret < 0) @@ -436,7 +436,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) /* Extract the returned uuid, uniquifier, fsEndpoints count and * either the first fsEndpoint type or the volEndpoints * count if there are no fsEndpoints. */ - /* Fall through */ + fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -475,7 +475,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall = 2; - /* Fall through - and extract fsEndpoints[] entries */ + fallthrough; /* and extract fsEndpoints[] entries */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -526,7 +526,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) * extract the type of the next endpoint when we extract the * data of the current one, but this is the first... */ - /* Fall through */ + fallthrough; case 3: ret = afs_extract_data(call, true); if (ret < 0) @@ -552,7 +552,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall = 4; - /* Fall through - and extract volEndpoints[] entries */ + fallthrough; /* and extract volEndpoints[] entries */ case 4: ret = afs_extract_data(call, true); if (ret < 0) @@ -587,7 +587,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_discard(call, 0); call->unmarshall = 5; - /* Fall through - Done */ + fallthrough; /* Done */ case 5: ret = afs_extract_data(call, false); if (ret < 0) @@ -663,7 +663,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through - and extract the cell name length */ + fallthrough; /* and extract the cell name length */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -685,7 +685,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_begin(call, cell_name, namesz); call->unmarshall++; - /* Fall through - and extract cell name */ + fallthrough; /* and extract cell name */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -694,7 +694,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_discard(call, call->count2); call->unmarshall++; - /* Fall through - and extract padding */ + fallthrough; /* and extract padding */ case 3: ret = afs_extract_data(call, false); if (ret < 0) diff --git a/fs/afs/write.c b/fs/afs/write.c index a121c247d95a3d63757cfd9e30557c41faa542e2..4b2265cb18917b5e28078b9b61ca098fab6139a8 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -609,7 +609,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, default: pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); - /* Fall through */ + fallthrough; case -EACCES: case -EPERM: case -ENOKEY: diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 8c24fdc899e378bc63b77272902c33c9dbad2eea..3b1239b7e90d8cffaa957e30a507dad2b8174afd 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -373,7 +373,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->offset = req->pos & (PAGE_SIZE - 1); afs_extract_to_tmp64(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -401,7 +401,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -428,7 +428,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + fallthrough; case 3: _debug("extract discard %zu/%llu", @@ -444,7 +444,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - /* Fall through */ + fallthrough; /* extract the metadata */ case 4: @@ -461,7 +461,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->file_size = vp->scb.status.size; call->unmarshall++; - /* Fall through */ + fallthrough; case 5: break; @@ -1262,7 +1262,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); - /* Fall through */ + fallthrough; /* extract the returned status record */ case 1: @@ -1275,7 +1275,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* extract the volume name length */ case 2: @@ -1290,7 +1290,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the volume name */ case 3: @@ -1304,7 +1304,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message length */ case 4: @@ -1319,7 +1319,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message */ case 5: @@ -1334,7 +1334,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day length */ case 6: @@ -1349,7 +1349,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day */ case 7: @@ -1363,7 +1363,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("motd '%s'", p); call->unmarshall++; - /* Fall through */ + fallthrough; case 8: break; @@ -1622,7 +1622,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file status count and array in two steps */ case 1: @@ -1640,7 +1640,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); - /* Fall through */ + fallthrough; case 2: _debug("extract status array %u", call->count); @@ -1670,7 +1670,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* Extract the callback count and array in two steps */ case 3: @@ -1687,7 +1687,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); - /* Fall through */ + fallthrough; case 4: _debug("extract CB array"); @@ -1716,7 +1716,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + fallthrough; case 5: ret = afs_extract_data(call, false); @@ -1727,7 +1727,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; - /* Fall through */ + fallthrough; case 6: break; @@ -1804,7 +1804,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file ACL length */ case 1: @@ -1826,7 +1826,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_discard(call, size); } call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file ACL */ case 2: @@ -1836,7 +1836,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the volume ACL length */ case 3: @@ -1858,7 +1858,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_discard(call, size); } call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the volume ACL */ case 4: @@ -1871,7 +1871,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the metadata */ case 5: @@ -1886,7 +1886,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; - /* Fall through */ + fallthrough; case 6: break; diff --git a/fs/aio.c b/fs/aio.c index 5736bff48e9e9cbfacd9fbbe4738d0643c90e067..d5ec303855669de4e90d7b810c73333d089486fb 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1511,7 +1511,7 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret) * may be already running. Just fail this IO with EINTR. */ ret = -EINTR; - /*FALLTHRU*/ + fallthrough; default: req->ki_complete(req, ret, 0); } diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index f2f9086ebe983269b1611ccaaf9a764f29ab4209..b9c658e0548eb85328be7efcd7285accecbe6535 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -576,7 +576,7 @@ static int load_flat_file(struct linux_binprm *bprm, goto err; } - len = data_len + extra; + len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); len = PAGE_ALIGN(len); realdatastart = vm_mmap(NULL, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); @@ -590,7 +590,9 @@ static int load_flat_file(struct linux_binprm *bprm, vm_munmap(textpos, text_len); goto err; } - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(unsigned long), + FLAT_DATA_ALIGN); pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", data_len + bss_len + stack_len, datapos); @@ -620,7 +622,7 @@ static int load_flat_file(struct linux_binprm *bprm, memp_size = len; } else { - len = text_len + data_len + extra; + len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32); len = PAGE_ALIGN(len); textpos = vm_mmap(NULL, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); @@ -635,7 +637,9 @@ static int load_flat_file(struct linux_binprm *bprm, } realdatastart = textpos + ntohl(hdr->data_start); - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(u32), + FLAT_DATA_ALIGN); reloc = (__be32 __user *) (datapos + (ntohl(hdr->reloc_start) - text_len)); @@ -652,9 +656,8 @@ static int load_flat_file(struct linux_binprm *bprm, (text_len + full_data - sizeof(struct flat_hdr)), 0); - if (datapos != realdatastart) - memmove((void *)datapos, (void *)realdatastart, - full_data); + memmove((void *) datapos, (void *) realdatastart, + full_data); #else /* * This is used on MMU systems mainly for testing. @@ -710,7 +713,8 @@ static int load_flat_file(struct linux_binprm *bprm, if (IS_ERR_VALUE(result)) { ret = result; pr_err("Unable to read code+data+bss, errno %d\n", ret); - vm_munmap(textpos, text_len + data_len + extra); + vm_munmap(textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(u32)); goto err; } } diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 613920c17ac1ae0e153262210cc5c8c05f5d3770..ea8aaf36647eeae682c89f2c70b9207922002ff2 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1798,7 +1798,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( cache->fs_info = fs_info; cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); - set_free_space_tree_thresholds(cache); cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; @@ -1912,6 +1911,8 @@ static int read_one_block_group(struct btrfs_fs_info *info, if (ret < 0) goto error; + set_free_space_tree_thresholds(cache); + if (need_clear) { /* * When we mount with old space cache, we need to @@ -2132,6 +2133,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, return -ENOMEM; cache->length = size; + set_free_space_tree_thresholds(cache); cache->used = bytes_used; cache->flags = type; cache->last_byte_to_unpin = (u64)-1; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 70e49d8d4f6c3154e53b217a76c3dd6396725691..cd392da69b8190a69071e041e7802452fa7588f4 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -68,7 +68,7 @@ const char *btrfs_super_csum_driver(u16 csum_type) btrfs_csums[csum_type].name; } -size_t __const btrfs_get_num_csums(void) +size_t __attribute_const__ btrfs_get_num_csums(void) { return ARRAY_SIZE(btrfs_csums); } @@ -1297,6 +1297,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), + eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > @@ -1370,7 +1372,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); @@ -1378,6 +1379,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, + btrfs_header_level(eb)); + btrfs_tree_read_lock(eb); if (tm) __tree_mod_log_rewind(fs_info, eb, time_seq, tm); else diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9c7e466f27a9a366c8e3c9f8058581fa58ed0e11..9a72896bed2ee464140734188771a9b319c46b92 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2262,7 +2262,7 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, int btrfs_super_csum_size(const struct btrfs_super_block *s); const char *btrfs_super_csum_name(u16 csum_type); const char *btrfs_super_csum_driver(u16 csum_type); -size_t __const btrfs_get_num_csums(void); +size_t __attribute_const__ btrfs_get_num_csums(void); /* @@ -2518,7 +2518,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes); int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, - u64 objectid, u64 offset, u64 bytenr); + u64 objectid, u64 offset, u64 bytenr, bool strict); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, @@ -2934,7 +2934,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, u64 start, u64 len); noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes); + u64 *ram_bytes, bool strict); void __btrfs_del_delalloc_inode(struct btrfs_root *root, struct btrfs_inode *inode); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9ae25f6321579da3b0a982333c848d38d6336e39..abf86b202b4318cc8687ddd6138e9839c9ae32fc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3418,6 +3418,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device btrfs_put_block_group_cache(fs_info); fail_tree_roots: + if (fs_info->data_reloc_root) + btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root); free_root_pointers(fs_info, true); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); @@ -4551,6 +4553,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) cache->io_ctl.inode = NULL; iput(inode); } + ASSERT(cache->io_ctl.pages == NULL); btrfs_put_block_group(cache); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index de6fe176fdfb3400c4b0786c9b032f972069d20e..780b9c9a98fe37ae4bed0c93abf3ae414026c1dd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -400,12 +400,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_BLOCK_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else if (is_data == BTRFS_REF_TYPE_DATA) { @@ -414,12 +413,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_DATA_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else { @@ -429,8 +427,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, } btrfs_print_leaf((struct extent_buffer *)eb); - btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", - eb->start, type); + btrfs_err(eb->fs_info, + "eb %llu iref 0x%lx invalid extent inline ref type %d", + eb->start, (unsigned long)iref, type); WARN_ON(1); return BTRFS_REF_TYPE_INVALID; @@ -2306,7 +2305,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root, static noinline int check_committed_ref(struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, u64 bytenr) + u64 objectid, u64 offset, u64 bytenr, + bool strict) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; @@ -2348,9 +2348,13 @@ static noinline int check_committed_ref(struct btrfs_root *root, btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) goto out; - /* If extent created before last snapshot => it's definitely shared */ - if (btrfs_extent_generation(leaf, ei) <= - btrfs_root_last_snapshot(&root->root_item)) + /* + * If extent created before last snapshot => it's shared unless the + * snapshot has been deleted. Use the heuristic if strict is false. + */ + if (!strict && + (btrfs_extent_generation(leaf, ei) <= + btrfs_root_last_snapshot(&root->root_item))) goto out; iref = (struct btrfs_extent_inline_ref *)(ei + 1); @@ -2375,7 +2379,7 @@ static noinline int check_committed_ref(struct btrfs_root *root, } int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, - u64 bytenr) + u64 bytenr, bool strict) { struct btrfs_path *path; int ret; @@ -2386,7 +2390,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, do { ret = check_committed_ref(root, path, objectid, - offset, bytenr); + offset, bytenr, strict); if (ret && ret != -ENOENT) goto out; @@ -4522,7 +4526,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, return ERR_PTR(-EUCLEAN); } - btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); + btrfs_set_buffer_lockdep_class(owner, buf, level); btrfs_tree_lock(buf); btrfs_clean_tree_block(buf); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6def411b2ebad7f506cff6b91e2dc9d4c2a0eacf..a940edb1e64f2e0f895af5a3defaafcfc47b90e3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5655,9 +5655,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, } } -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dstv, - unsigned long start, unsigned long len) +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dstv, + unsigned long start, unsigned long len) { size_t cur; size_t offset; @@ -5677,7 +5677,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); - if (copy_to_user(dst, kaddr + offset, cur)) { + if (copy_to_user_nofault(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 00a88f2eb5ab6d50301c0bbf116ad39921039e10..30794ae58498bbc50736796c1a233a297f193c76 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -241,9 +241,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, void read_extent_buffer(const struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len); -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dst, unsigned long start, - unsigned long len); +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dst, unsigned long start, + unsigned long len); void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src); void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, const void *src); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index bb824c7cb7c7d6adc32a10047c638ee7bc35fee2..4507c3d093994f26cac6386f415d632d6a0098a3 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1571,7 +1571,7 @@ static int check_can_nocow(struct btrfs_inode *inode, loff_t pos, } ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, - NULL, NULL, NULL); + NULL, NULL, NULL, false); if (ret <= 0) { ret = 0; if (!nowait) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ef0fd7afb0b1e8f45674bfd94044aef24a7a2c57..dc82fd0c80cbbab11cabaec665f214074107cff9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1186,7 +1186,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root, ret = update_cache_item(trans, root, inode, path, offset, io_ctl->entries, io_ctl->bitmaps); out: - io_ctl_free(io_ctl); if (ret) { invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; @@ -1347,6 +1346,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * them out later */ io_ctl_drop_pages(io_ctl); + io_ctl_free(io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state); diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 8b1f5c8897b750a6c740922e16833117d05ae3e2..6b9faf3b0e96763b897e941051c7c19ded93d2bc 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -22,6 +22,10 @@ void set_free_space_tree_thresholds(struct btrfs_block_group *cache) size_t bitmap_size; u64 num_bitmaps, total_bitmap_size; + if (WARN_ON(cache->length == 0)) + btrfs_warn(cache->fs_info, "block group %llu length is zero", + cache->start); + /* * We convert to bitmaps when the disk space required for using extents * exceeds that required for using bitmaps. diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 51fcd82d41c0ebec1fceb671dc23e40492941e6f..9570458aa8471dd4d442de81e2e3771f2823921e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1610,7 +1610,7 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode, goto out_check; ret = btrfs_cross_ref_exist(root, ino, found_key.offset - - extent_offset, disk_bytenr); + extent_offset, disk_bytenr, false); if (ret) { /* * ret could be -EIO if the above fails to read @@ -2161,11 +2161,8 @@ static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio, u64 bio_offset) { struct inode *inode = private_data; - blk_status_t ret = 0; - ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); - BUG_ON(ret); /* -ENOMEM */ - return 0; + return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0); } /* @@ -6953,6 +6950,8 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, * @orig_start: (optional) Return the original file offset of the file extent * @orig_len: (optional) Return the original on-disk length of the file extent * @ram_bytes: (optional) Return the ram_bytes of the file extent + * @strict: if true, omit optimizations that might force us into unnecessary + * cow. e.g., don't trust generation number. * * This function will flush ordered extents in the range to ensure proper * nocow checks for (nowait == false) case. @@ -6967,7 +6966,7 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, */ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, - u64 *ram_bytes) + u64 *ram_bytes, bool strict) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_path *path; @@ -7045,8 +7044,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, * Do the same check as in btrfs_cross_ref_exist but without the * unnecessary search. */ - if (btrfs_file_extent_generation(leaf, fi) <= - btrfs_root_last_snapshot(&root->root_item)) + if (!strict && + (btrfs_file_extent_generation(leaf, fi) <= + btrfs_root_last_snapshot(&root->root_item))) goto out; backref_offset = btrfs_file_extent_offset(leaf, fi); @@ -7082,7 +7082,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, */ ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), - key.offset - backref_offset, disk_bytenr); + key.offset - backref_offset, disk_bytenr, + strict); if (ret) { ret = 0; goto out; @@ -7303,7 +7304,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, block_start = em->block_start + (start - em->start); if (can_nocow_extent(inode, start, &len, &orig_start, - &orig_block_len, &ram_bytes) == 1 && + &orig_block_len, &ram_bytes, false) == 1 && btrfs_inc_nocow_writers(fs_info, block_start)) { struct extent_map *em2; @@ -7619,10 +7620,8 @@ static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, struct bio *bio, u64 offset) { struct inode *inode = private_data; - blk_status_t ret; - ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1); - BUG_ON(ret); /* -ENOMEM */ - return 0; + + return btrfs_csum_one_bio(BTRFS_I(inode), bio, offset, 1); } static void btrfs_end_dio_bio(struct bio *bio) @@ -10136,7 +10135,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, free_extent_map(em); em = NULL; - ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL); + ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true); if (ret < 0) { goto out; } else if (ret) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index bd3511c5ca8126f837f95ddb8ca5a7b7ff763b44..ac45f022b495886a995c06f9722ec3d71152e436 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2086,9 +2086,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2096,10 +2101,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2184,6 +2193,10 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { + ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset); + if (ret) + break; + ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 61f44e78e3c9e7a61efc48fb5bae1cd91206cf88..80567c11ec122dc06826c19797a589e63e4e7be9 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * offset is supposed to be a tree block which * must be aligned to nodesize. */ - if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = (struct btrfs_extent_data_ref *)(&iref->offset); @@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * must be aligned to nodesize. */ if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; default: pr_cont("(extent %llu has INVALID ref type %d)\n", diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 5a6cb9db512ebd0fd434792b58b63eb1195dacd5..354ab9985a3426b03ccafaa4fd2f1fb59a2629e0 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3716,50 +3716,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, return 0; } +static void scrub_workers_put(struct btrfs_fs_info *fs_info) +{ + if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, + &fs_info->scrub_lock)) { + struct btrfs_workqueue *scrub_workers = NULL; + struct btrfs_workqueue *scrub_wr_comp = NULL; + struct btrfs_workqueue *scrub_parity = NULL; + + scrub_workers = fs_info->scrub_workers; + scrub_wr_comp = fs_info->scrub_wr_completion_workers; + scrub_parity = fs_info->scrub_parity_workers; + + fs_info->scrub_workers = NULL; + fs_info->scrub_wr_completion_workers = NULL; + fs_info->scrub_parity_workers = NULL; + mutex_unlock(&fs_info->scrub_lock); + + btrfs_destroy_workqueue(scrub_workers); + btrfs_destroy_workqueue(scrub_wr_comp); + btrfs_destroy_workqueue(scrub_parity); + } +} + /* * get a reference count on fs_info->scrub_workers. start worker if necessary */ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, int is_dev_replace) { + struct btrfs_workqueue *scrub_workers = NULL; + struct btrfs_workqueue *scrub_wr_comp = NULL; + struct btrfs_workqueue *scrub_parity = NULL; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; int max_active = fs_info->thread_pool_size; + int ret = -ENOMEM; - lockdep_assert_held(&fs_info->scrub_lock); + if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) + return 0; - if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { - ASSERT(fs_info->scrub_workers == NULL); - fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", - flags, is_dev_replace ? 1 : max_active, 4); - if (!fs_info->scrub_workers) - goto fail_scrub_workers; - - ASSERT(fs_info->scrub_wr_completion_workers == NULL); - fs_info->scrub_wr_completion_workers = - btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, - max_active, 2); - if (!fs_info->scrub_wr_completion_workers) - goto fail_scrub_wr_completion_workers; + scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, + is_dev_replace ? 1 : max_active, 4); + if (!scrub_workers) + goto fail_scrub_workers; - ASSERT(fs_info->scrub_parity_workers == NULL); - fs_info->scrub_parity_workers = - btrfs_alloc_workqueue(fs_info, "scrubparity", flags, + scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, max_active, 2); - if (!fs_info->scrub_parity_workers) - goto fail_scrub_parity_workers; + if (!scrub_wr_comp) + goto fail_scrub_wr_completion_workers; + scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, + max_active, 2); + if (!scrub_parity) + goto fail_scrub_parity_workers; + + mutex_lock(&fs_info->scrub_lock); + if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { + ASSERT(fs_info->scrub_workers == NULL && + fs_info->scrub_wr_completion_workers == NULL && + fs_info->scrub_parity_workers == NULL); + fs_info->scrub_workers = scrub_workers; + fs_info->scrub_wr_completion_workers = scrub_wr_comp; + fs_info->scrub_parity_workers = scrub_parity; refcount_set(&fs_info->scrub_workers_refcnt, 1); - } else { - refcount_inc(&fs_info->scrub_workers_refcnt); + mutex_unlock(&fs_info->scrub_lock); + return 0; } - return 0; + /* Other thread raced in and created the workers for us */ + refcount_inc(&fs_info->scrub_workers_refcnt); + mutex_unlock(&fs_info->scrub_lock); + ret = 0; + btrfs_destroy_workqueue(scrub_parity); fail_scrub_parity_workers: - btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); + btrfs_destroy_workqueue(scrub_wr_comp); fail_scrub_wr_completion_workers: - btrfs_destroy_workqueue(fs_info->scrub_workers); + btrfs_destroy_workqueue(scrub_workers); fail_scrub_workers: - return -ENOMEM; + return ret; } int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, @@ -3770,9 +3804,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, int ret; struct btrfs_device *dev; unsigned int nofs_flag; - struct btrfs_workqueue *scrub_workers = NULL; - struct btrfs_workqueue *scrub_wr_comp = NULL; - struct btrfs_workqueue *scrub_parity = NULL; if (btrfs_fs_closing(fs_info)) return -EAGAIN; @@ -3819,13 +3850,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, if (IS_ERR(sctx)) return PTR_ERR(sctx); + ret = scrub_workers_get(fs_info, is_dev_replace); + if (ret) + goto out_free_ctx; + mutex_lock(&fs_info->fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && !is_dev_replace)) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -ENODEV; - goto out_free_ctx; + goto out; } if (!is_dev_replace && !readonly && @@ -3834,7 +3869,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", rcu_str_deref(dev->name)); ret = -EROFS; - goto out_free_ctx; + goto out; } mutex_lock(&fs_info->scrub_lock); @@ -3843,7 +3878,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EIO; - goto out_free_ctx; + goto out; } down_read(&fs_info->dev_replace.rwsem); @@ -3854,17 +3889,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->fs_devices->device_list_mutex); ret = -EINPROGRESS; - goto out_free_ctx; + goto out; } up_read(&fs_info->dev_replace.rwsem); - ret = scrub_workers_get(fs_info, is_dev_replace); - if (ret) { - mutex_unlock(&fs_info->scrub_lock); - mutex_unlock(&fs_info->fs_devices->device_list_mutex); - goto out_free_ctx; - } - sctx->readonly = readonly; dev->scrub_ctx = sctx; mutex_unlock(&fs_info->fs_devices->device_list_mutex); @@ -3917,24 +3945,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, mutex_lock(&fs_info->scrub_lock); dev->scrub_ctx = NULL; - if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) { - scrub_workers = fs_info->scrub_workers; - scrub_wr_comp = fs_info->scrub_wr_completion_workers; - scrub_parity = fs_info->scrub_parity_workers; - - fs_info->scrub_workers = NULL; - fs_info->scrub_wr_completion_workers = NULL; - fs_info->scrub_parity_workers = NULL; - } mutex_unlock(&fs_info->scrub_lock); - btrfs_destroy_workqueue(scrub_workers); - btrfs_destroy_workqueue(scrub_wr_comp); - btrfs_destroy_workqueue(scrub_parity); + scrub_workers_put(fs_info); scrub_put_ctx(sctx); return ret; - +out: + scrub_workers_put(fs_info); out_free_ctx: scrub_free_ctx(sctx); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index e529ddb35b87f32bbc2363111f4dd19ade19d347..25967ecaaf0af97fca8ba95830bdfa5599996706 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -625,6 +625,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } else if (strncmp(args[0].from, "lzo", 3) == 0) { compress_type = "lzo"; info->compress_type = BTRFS_COMPRESS_LZO; + info->compress_level = 0; btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATASUM); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 20c6ac1a5de7fa3df7b0a8eef384c458c772f201..d2fc292ac61b1dcc06a9f34406fcccf4aa319a1d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1636,6 +1636,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); if (IS_ERR(pending->snap)) { ret = PTR_ERR(pending->snap); + pending->snap = NULL; btrfs_abort_transaction(trans, ret); goto fail; } diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 517b44300a05c940586ebcaa69e8881f21a722d0..7b1fee630f97851a35eb0bb34d33d95d1e31b991 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -984,7 +984,7 @@ static int check_inode_item(struct extent_buffer *leaf, /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */ if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) { inode_item_err(leaf, slot, - "invalid inode generation: has %llu expect [0, %llu]", + "invalid inode transid: has %llu expect [0, %llu]", btrfs_inode_transid(leaf, iitem), super_gen + 1); return -EUCLEAN; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 696dd861cc3c63324a7488fcf31491ad42e46aac..39da9db352786071eaf57fbdd9cf1d36c1910c28 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3449,11 +3449,13 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_free_path(path); out_unlock: mutex_unlock(&dir->log_mutex); - if (ret == -ENOSPC) { + if (err == -ENOSPC) { btrfs_set_log_full_commit(trans); - ret = 0; - } else if (ret < 0) - btrfs_abort_transaction(trans, ret); + err = 0; + } else if (err < 0 && err != -ENOENT) { + /* ENOENT can be returned if the entry hasn't been fsynced yet */ + btrfs_abort_transaction(trans, err); + } btrfs_end_log_trans(root); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ee96c5869f57e2aa2945325446e58e7002ca6341..117b43367629e418ae15a819c6a96b3d24488927 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -4462,6 +4463,7 @@ int btrfs_uuid_scan_kthread(void *data) goto skip; } update_tree: + btrfs_release_path(path); if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, @@ -4486,6 +4488,7 @@ int btrfs_uuid_scan_kthread(void *data) } skip: + btrfs_release_path(path); if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; @@ -4493,7 +4496,6 @@ int btrfs_uuid_scan_kthread(void *data) break; } - btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { @@ -6483,8 +6485,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; + unsigned int nofs_flag; + /* + * We call this under the chunk_mutex, so we want to use NOFS for this + * allocation, however we don't want to change btrfs_alloc_device() to + * always do NOFS because we use it in a lot of other GFP_KERNEL safe + * places. + */ + nofs_flag = memalloc_nofs_save(); device = btrfs_alloc_device(NULL, &devid, dev_uuid); + memalloc_nofs_restore(nofs_flag); if (IS_ERR(device)) return device; diff --git a/fs/buffer.c b/fs/buffer.c index 061dd202979d59036beb918bc13b2b9a4860e0fd..50bbc99e3d960d1bd4538589510540ed1e2f004d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1958,7 +1958,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, */ set_buffer_new(bh); set_buffer_unwritten(bh); - /* FALLTHRU */ + fallthrough; case IOMAP_MAPPED: if ((iomap->flags & IOMAP_F_NEW) || offset >= i_size_read(inode)) @@ -3157,6 +3157,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { + /* + * The bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(bh)) { + unlock_buffer(bh); + return -EIO; + } + get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(REQ_OP_WRITE, op_flags, bh); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 55ccccf77ceab50cba27ec82f9eb8cb29408ded0..034b3f4fdd3a7f201900298f47b295d5cce7b957 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -887,8 +887,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) int have = ci->i_snap_caps; if ((have & mask) == mask) { - dout("__ceph_caps_issued_mask ino 0x%lx snap issued %s" - " (mask %s)\n", ci->vfs_inode.i_ino, + dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s" + " (mask %s)\n", ceph_ino(&ci->vfs_inode), ceph_cap_string(have), ceph_cap_string(mask)); return 1; @@ -899,8 +899,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) if (!__cap_is_valid(cap)) continue; if ((cap->issued & mask) == mask) { - dout("__ceph_caps_issued_mask ino 0x%lx cap %p issued %s" - " (mask %s)\n", ci->vfs_inode.i_ino, cap, + dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s" + " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap, ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) @@ -911,8 +911,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) /* does a combination of caps satisfy mask? */ have |= cap->issued; if ((have & mask) == mask) { - dout("__ceph_caps_issued_mask ino 0x%lx combo issued %s" - " (mask %s)\n", ci->vfs_inode.i_ino, + dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s" + " (mask %s)\n", ceph_ino(&ci->vfs_inode), ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) { @@ -2872,7 +2872,7 @@ int ceph_get_caps(struct file *filp, int need, int want, struct cap_wait cw; DEFINE_WAIT_FUNC(wait, woken_wake_function); - cw.ino = inode->i_ino; + cw.ino = ceph_ino(inode); cw.tgid = current->tgid; cw.need = need; cw.want = want; diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 97539b497e4c419ccacb9c38a8c554e0eedcdc18..3e3fcda9b276cbf757a9e15d83ff4278ad01743c 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -202,7 +202,7 @@ static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p) { struct seq_file *s = p; - seq_printf(s, "0x%-17lx%-17s%-17s\n", inode->i_ino, + seq_printf(s, "0x%-17llx%-17s%-17s\n", ceph_ino(inode), ceph_cap_string(cap->issued), ceph_cap_string(cap->implemented)); return 0; @@ -247,7 +247,7 @@ static int caps_show(struct seq_file *s, void *p) spin_lock(&mdsc->caps_list_lock); list_for_each_entry(cw, &mdsc->cap_wait_list, list) { - seq_printf(s, "%-13d0x%-17lx%-17s%-17s\n", cw->tgid, cw->ino, + seq_printf(s, "%-13d0x%-17llx%-17s%-17s\n", cw->tgid, cw->ino, ceph_cap_string(cw->need), ceph_cap_string(cw->want)); } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 060bdcc5ce32cfa873dc5b084c6caf02c20e9aca..d72e4a12bb69ada4c2b3739521f4d7f834ae23a6 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -259,9 +259,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, dentry, dentry, d_inode(dentry)); ctx->pos = di->offset; if (!dir_emit(ctx, dentry->d_name.name, - dentry->d_name.len, - ceph_translate_ino(dentry->d_sb, - d_inode(dentry)->i_ino), + dentry->d_name.len, ceph_present_inode(d_inode(dentry)), d_inode(dentry)->i_mode >> 12)) { dput(dentry); err = 0; @@ -324,18 +322,21 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) /* always start with . and .. */ if (ctx->pos == 0) { dout("readdir off 0 -> '.'\n"); - if (!dir_emit(ctx, ".", 1, - ceph_translate_ino(inode->i_sb, inode->i_ino), + if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode), inode->i_mode >> 12)) return 0; ctx->pos = 1; } if (ctx->pos == 1) { - ino_t ino = parent_ino(file->f_path.dentry); + u64 ino; + struct dentry *dentry = file->f_path.dentry; + + spin_lock(&dentry->d_lock); + ino = ceph_present_inode(dentry->d_parent->d_inode); + spin_unlock(&dentry->d_lock); + dout("readdir off 1 -> '..'\n"); - if (!dir_emit(ctx, "..", 2, - ceph_translate_ino(inode->i_sb, ino), - inode->i_mode >> 12)) + if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12)) return 0; ctx->pos = 2; } @@ -507,9 +508,6 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) } for (; i < rinfo->dir_nr; i++) { struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; - struct ceph_vino vino; - ino_t ino; - u32 ftype; BUG_ON(rde->offset < ctx->pos); @@ -519,13 +517,10 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) rde->name_len, rde->name, &rde->inode.in); BUG_ON(!rde->inode.in); - ftype = le32_to_cpu(rde->inode.in->mode) >> 12; - vino.ino = le64_to_cpu(rde->inode.in->ino); - vino.snap = le64_to_cpu(rde->inode.in->snapid); - ino = ceph_vino_to_ino(vino); if (!dir_emit(ctx, rde->name, rde->name_len, - ceph_translate_ino(inode->i_sb, ino), ftype)) { + ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)), + le32_to_cpu(rde->inode.in->mode) >> 12)) { dout("filldir stopping us...\n"); return 0; } @@ -1161,7 +1156,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) if (try_async && op == CEPH_MDS_OP_UNLINK && (req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) { - dout("async unlink on %lu/%.*s caps=%s", dir->i_ino, + dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir), dentry->d_name.len, dentry->d_name.name, ceph_cap_string(req->r_dir_caps)); set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags); @@ -1745,7 +1740,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) case -ENOENT: if (d_really_is_negative(dentry)) valid = 1; - /* Fallthrough */ + fallthrough; default: break; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index d51c3f2fdca0258c0ab11c0c73d09d11a05eb409..3f4c993dfc6fec89267bf529ca89b7f5e36ba4e8 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -252,7 +252,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFREG: ceph_fscache_register_inode_cookie(inode); ceph_fscache_file_set_cookie(inode, file); - /* fall through */ + fallthrough; case S_IFDIR: ret = ceph_init_file_info(inode, file, fmode, S_ISDIR(inode->i_mode)); @@ -630,8 +630,8 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, } else { struct dentry *dn; - dout("%s d_adding new inode 0x%llx to 0x%lx/%s\n", __func__, - vino.ino, dir->i_ino, dentry->d_name.name); + dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__, + vino.ino, ceph_ino(dir), dentry->d_name.name); ceph_dir_clear_ordered(dir); ceph_init_inode_acls(inode, as_ctx); if (inode->i_state & I_NEW) { @@ -2507,6 +2507,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 357c937699d567cea31929da1baedd3d30ab036d..d163fa96cb40164a5ff9d6fc3e79ff63d78655a5 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -41,8 +41,10 @@ static void ceph_inode_work(struct work_struct *work); */ static int ceph_set_ino_cb(struct inode *inode, void *data) { - ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; - inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); + struct ceph_inode_info *ci = ceph_inode(inode); + + ci->i_vino = *(struct ceph_vino *)data; + inode->i_ino = ceph_vino_to_ino_t(ci->i_vino); inode_set_iversion_raw(inode, 0); return 0; } @@ -50,17 +52,14 @@ static int ceph_set_ino_cb(struct inode *inode, void *data) struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) { struct inode *inode; - ino_t t = ceph_vino_to_ino(vino); - inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); + inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare, + ceph_set_ino_cb, &vino); if (!inode) return ERR_PTR(-ENOMEM); - if (inode->i_state & I_NEW) - dout("get_inode created new inode %p %llx.%llx ino %llx\n", - inode, ceph_vinop(inode), (u64)inode->i_ino); - dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, - vino.snap, inode); + dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode), + ceph_vinop(inode), inode, !!(inode->i_state & I_NEW)); return inode; } @@ -2378,7 +2377,7 @@ int ceph_getattr(const struct path *path, struct kstat *stat, } generic_fillattr(inode, stat); - stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); + stat->ino = ceph_present_inode(inode); /* * btime on newly-allocated inodes is 0, so if this is still set to diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index bc9e95937d7c6a4780a053666868010a51dfde6e..658800605bfb48f2e43fca54e6ef9d82225f8e93 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -372,7 +372,7 @@ struct ceph_quotarealm_inode { struct cap_wait { struct list_head list; - unsigned long ino; + u64 ino; pid_t tgid; int need; int want; diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 198ddde5c1e6ae38ff9428e7d6f820c9c8785df2..cc2c4d40b0222f77114666410c3fcc30350dd1b0 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -23,12 +23,12 @@ static inline bool ceph_has_realms_with_quotas(struct inode *inode) { struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct super_block *sb = mdsc->fsc->sb; + struct inode *root = d_inode(sb->s_root); if (atomic64_read(&mdsc->quotarealms_count) > 0) return true; /* if root is the real CephFS root, we don't have quota realms */ - if (sb->s_root->d_inode && - (sb->s_root->d_inode->i_ino == CEPH_INO_ROOT)) + if (root && ceph_ino(root) == CEPH_INO_ROOT) return false; /* otherwise, we can't know for sure */ return true; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 4c3c964b1c543493f3364a856f1165140e51450b..a3995ebe0623e950e4d1266b348d72f8ce3b3768 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -457,15 +457,7 @@ ceph_vino(const struct inode *inode) return ceph_inode(inode)->i_vino; } -/* - * ino_t is <64 bits on many architectures, blech. - * - * i_ino (kernel inode) st_ino (userspace) - * i386 32 32 - * x86_64+ino32 64 32 - * x86_64 64 64 - */ -static inline u32 ceph_ino_to_ino32(__u64 vino) +static inline u32 ceph_ino_to_ino32(u64 vino) { u32 ino = vino & 0xffffffff; ino ^= vino >> 32; @@ -475,34 +467,17 @@ static inline u32 ceph_ino_to_ino32(__u64 vino) } /* - * kernel i_ino value + * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on + * some arches. We generally do not use this value inside the ceph driver, but + * we do want to set it to something, so that generic vfs code has an + * appropriate value for tracepoints and the like. */ -static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) +static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino) { -#if BITS_PER_LONG == 32 - return ceph_ino_to_ino32(vino.ino); -#else + if (sizeof(ino_t) == sizeof(u32)) + return ceph_ino_to_ino32(vino.ino); return (ino_t)vino.ino; -#endif -} - -/* - * user-visible ino (stat, filldir) - */ -#if BITS_PER_LONG == 32 -static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino) -{ - return ino; -} -#else -static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino) -{ - if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)) - ino = ceph_ino_to_ino32(ino); - return ino; } -#endif - /* for printf-style formatting */ #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap @@ -511,11 +486,34 @@ static inline u64 ceph_ino(struct inode *inode) { return ceph_inode(inode)->i_vino.ino; } + static inline u64 ceph_snap(struct inode *inode) { return ceph_inode(inode)->i_vino.snap; } +/** + * ceph_present_ino - format an inode number for presentation to userland + * @sb: superblock where the inode lives + * @ino: inode number to (possibly) convert + * + * If the user mounted with the ino32 option, then the 64-bit value needs + * to be converted to something that can fit inside 32 bits. Note that + * internal kernel code never uses this value, so this is entirely for + * userland consumption. + */ +static inline u64 ceph_present_ino(struct super_block *sb, u64 ino) +{ + if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))) + return ceph_ino_to_ino32(ino); + return ino; +} + +static inline u64 ceph_present_inode(struct inode *inode) +{ + return ceph_present_ino(inode->i_sb, ceph_ino(inode)); +} + static inline int ceph_ino_compare(struct inode *inode, void *data) { struct ceph_vino *pvino = (struct ceph_vino *)data; @@ -524,11 +522,16 @@ static inline int ceph_ino_compare(struct inode *inode, void *data) ci->i_vino.snap == pvino->snap; } + static inline struct inode *ceph_find_inode(struct super_block *sb, struct ceph_vino vino) { - ino_t t = ceph_vino_to_ino(vino); - return ilookup5(sb, t, ceph_ino_compare, &vino); + /* + * NB: The hashval will be run through the fs/inode.c hash function + * anyway, so there is no need to squash the inode number down to + * 32-bits first. Just use low-order bits on arches with 32-bit long. + */ + return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino); } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index b296964b8afa6c4319d07b0593bbcfbbcc97a833..b565d83ba89ed93e78bef0b32eb7aa9c136034ae 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -2031,4 +2031,19 @@ static inline bool is_smb1_server(struct TCP_Server_Info *server) return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0; } +static inline bool is_tcon_dfs(struct cifs_tcon *tcon) +{ + /* + * For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS + * Subsystem Notifies That a Share Is a DFS Share. + * + * For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server + * Application Updates a Share. + */ + if (!tcon || !tcon->ses || !tcon->ses->server) + return false; + return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS : + tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT); +} + #endif /* _CIFS_GLOB_H */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 0e763d2dcf16648d6465febd8b5f079fd6486e2a..0496934feecb777b6a7ae53a00003278f23517ea 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -581,7 +581,7 @@ should_set_ext_sec_flag(enum securityEnum sectype) if (global_secflags & (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)) return true; - /* Fallthrough */ + fallthrough; default: return false; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a275ee399dcea070fb898c21e5c1e94cda476783..a5731dd6e6566e7f86bac2db176f3c640a4d266f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1378,25 +1378,25 @@ static int cifs_parse_security_flavors(char *value, return 1; case Opt_sec_krb5i: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_krb5: vol->sectype = Kerberos; break; case Opt_sec_ntlmsspi: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_ntlmssp: vol->sectype = RawNTLMSSP; break; case Opt_sec_ntlmi: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_ntlm: vol->sectype = NTLM; break; case Opt_sec_ntlmv2i: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_ntlmv2: vol->sectype = NTLMv2; break; @@ -2187,7 +2187,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, vol->password = NULL; break; } - /* Fallthrough - to Opt_pass below.*/ + fallthrough; /* to Opt_pass below */ case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); @@ -4909,7 +4909,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol) if (!tcon) continue; /* Make sure that requests go through new root servers */ - if (tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT)) { + if (is_tcon_dfs(tcon)) { put_root_ses(root_ses); set_root_ses(cifs_sb, ses, &root_ses); } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 3989d08396acb077512888e4954fea8964b3d7b5..1f75b25e559a7594739356ae6474d69b47a89d40 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1017,6 +1017,8 @@ cifs_get_inode_info(struct inode **inode, if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n", __func__, rc); @@ -1025,6 +1027,8 @@ cifs_get_inode_info(struct inode **inode, } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n", __func__, rc); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 69cd5856621b8af2455f58f07b434ce1f9cee028..de564368a887c23d4de9c6c84ad8de272482d65c 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -798,7 +798,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; - /* Fallthrough */ + fallthrough; default: return Unspecified; } @@ -815,7 +815,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) default: break; } - /* Fallthrough - to attempt LANMAN authentication next */ + fallthrough; /* to attempt LANMAN authentication next */ case CIFS_NEGFLAVOR_LANMAN: switch (requested) { case LANMAN: @@ -823,7 +823,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) case Unspecified: if (global_secflags & CIFSSEC_MAY_LANMAN) return LANMAN; - /* Fallthrough */ + fallthrough; default: return Unspecified; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 667d70aa335fccfd0cc038bd9d3cafc7635f3afa..96c172d94fba4046f96b6183f82a7f04af2a0bd1 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1101,7 +1101,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; - /* Fallthrough */ + fallthrough; default: return Unspecified; } diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index cb733652ecca6cd0679b57acf3db93efdd4751d4..ca22737272258904575e78b6d9e6fb44816021a1 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1688,11 +1688,11 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case 1: offset += file->f_pos; - /* fall through */ + fallthrough; case 0: if (offset >= 0) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/fs/dax.c b/fs/dax.c index 95341af1a9660dc23aee9e141712c3bf4d1620c7..994ab66a99071356353365825376c35ae1d4f990 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1367,7 +1367,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, ret = dax_load_hole(&xas, mapping, &entry, vmf); goto finish_iomap; } - /*FALLTHRU*/ + fallthrough; default: WARN_ON_ONCE(1); error = -EIO; diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index b167d2d0214849a98fd6fe457eb9583fa33bf23c..a768a09430c3a9c0cad71e2f47a8544acafdf9c0 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -177,7 +177,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp) goto out; if (!fops_get(real_fops)) { -#ifdef MODULE +#ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) goto out; @@ -312,7 +312,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp) goto out; if (!fops_get(real_fops)) { -#ifdef MODULE +#ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) goto out; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 18d81599522f34e5a78011a8326e1ad7614e3254..002123efc6b052346ee8c7093789fbbb08929bd1 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -5817,7 +5817,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, break; case -EAGAIN: error = 0; - /* fall through */ + fallthrough; default: __put_lkb(ls, lkb); goto out; diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 7d40d78ea8648ecdd11f8ee85f899232c2c5d4ab..ae325541884e3b44332bba1cdf1d357e55e510f6 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -359,7 +359,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, return z_erofs_extent_lookback(m, m->delta[0]); case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: map->m_la = (lcn << lclusterbits) | m->clusterofs; break; @@ -416,7 +416,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: if (endoff >= m.clusterofs) map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: if (endoff >= m.clusterofs) { map->m_la = (m.lcn << lclusterbits) | m.clusterofs; @@ -433,7 +433,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, end = (m.lcn << lclusterbits) | m.clusterofs; map->m_flags |= EROFS_MAP_FULL_MAPPED; m.delta[0] = 1; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: /* get the correspoinding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 12eebcdea9c8a2a39c9dbf079dd41d8e67648fec..8107e06d7f6f56873914c99e6b32cefcd541dc1f 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1994,9 +1994,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * not already there, and calling reverse_path_check() * during ep_insert(). */ - if (list_empty(&epi->ffd.file->f_tfile_llink)) - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (list_empty(&epi->ffd.file->f_tfile_llink)) { + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); + } } } mutex_unlock(&ep->mtx); @@ -2040,6 +2042,7 @@ static void clear_tfile_check_list(void) file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); + fput(file); } INIT_LIST_HEAD(&tfile_check_list); } @@ -2200,25 +2203,22 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, full_check = 1; if (is_file_epoll(tf.file)) { error = -ELOOP; - if (ep_loop_check(ep, tf.file) != 0) { - clear_tfile_check_list(); + if (ep_loop_check(ep, tf.file) != 0) goto error_tgt_fput; - } - } else + } else { + get_file(tf.file); list_add(&tf.file->f_tfile_llink, &tfile_check_list); + } error = epoll_mutex_lock(&ep->mtx, 0, nonblock); - if (error) { -out_del: - list_del(&tf.file->f_tfile_llink); + if (error) goto error_tgt_fput; - } if (is_file_epoll(tf.file)) { tep = tf.file->private_data; error = epoll_mutex_lock(&tep->mtx, 1, nonblock); if (error) { mutex_unlock(&ep->mtx); - goto out_del; + goto error_tgt_fput; } } } @@ -2239,8 +2239,6 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, error = ep_insert(ep, epds, tf.file, fd, full_check); } else error = -EEXIST; - if (full_check) - clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) @@ -2263,8 +2261,10 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, mutex_unlock(&ep->mtx); error_tgt_fput: - if (full_check) + if (full_check) { + clear_tfile_check_list(); mutex_unlock(&epmutex); + } fdput(tf); error_fput: diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 60378ddf1424b019485ae77940bb08abff39b849..96044f5dbc0e0232386e9bd489dc20ae6ecdf066 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); vm_fault_t ret; + bool write = (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); - if (vmf->flags & FAULT_FLAG_WRITE) { + if (write) { sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); } @@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); up_read(&ei->dax_sem); - if (vmf->flags & FAULT_FLAG_WRITE) + if (write) sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 80662e1f7889f532bb061479d87d4174712fbc63..415c21f0e75088c61dd9614bf73cf5b2b3859893 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1241,7 +1241,7 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset) mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 1); } - /* fall through */ + fallthrough; case EXT2_IND_BLOCK: nr = i_data[EXT2_DIND_BLOCK]; if (nr) { @@ -1249,7 +1249,7 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset) mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 2); } - /* fall through */ + fallthrough; case EXT2_DIND_BLOCK: nr = i_data[EXT2_TIND_BLOCK]; if (nr) { diff --git a/fs/ext2/super.c b/fs/ext2/super.c index dda860562ca346683d88b91e1be889dff6eef9e7..7fab2b3b5b3980964b2f4b5fd186535c6a7692d3 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -587,7 +587,7 @@ static int parse_options(char *options, struct super_block *sb, case Opt_xip: ext2_msg(sb, KERN_INFO, "use dax instead of xip"); set_opt(opts->s_mount_opt, XIP); - /* Fall through */ + fallthrough; case Opt_dax: #ifdef CONFIG_FS_DAX ext2_msg(sb, KERN_WARNING, diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 1afa5a4bcb5f87d9ae6768955025563dad45a2de..619dd35ddd48a973c6fc834731cf87634f72c3d2 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -110,7 +110,7 @@ config EXT4_KUNIT_TESTS This builds the ext4 KUnit tests. KUnit tests run during boot and output the results to the debug log - in TAP format (http://testanything.org/). Only useful for kernel devs + in TAP format (https://testanything.org/). Only useful for kernel devs running KUnit test harness and are not for inclusion into a production build. diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 1ba46d87cdf103a81146358b84765bfa4f6fc311..48c3df47748dbefc74669fb666356d57a6b61278 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -413,7 +413,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb, * Return buffer_head on success or an ERR_PTR in case of failure. */ struct buffer_head * -ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) +ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, + bool ignore_locked) { struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -441,6 +442,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) return ERR_PTR(-ENOMEM); } + if (ignore_locked && buffer_locked(bh)) { + /* buffer under IO already, return if called for prefetching */ + put_bh(bh); + return NULL; + } + if (bitmap_uptodate(bh)) goto verify; @@ -487,10 +494,11 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) * submit the buffer_head for reading */ set_buffer_new(bh); - trace_ext4_read_block_bitmap_load(sb, block_group); + trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); - submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); + submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO | + (ignore_locked ? REQ_RAHEAD : 0), bh); return bh; verify: err = ext4_validate_block_bitmap(sb, desc, block_group, bh); @@ -534,7 +542,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) struct buffer_head *bh; int err; - bh = ext4_read_block_bitmap_nowait(sb, block_group); + bh = ext4_read_block_bitmap_nowait(sb, block_group, false); if (IS_ERR(bh)) return bh; err = ext4_wait_block_bitmap(sb, block_group, bh); diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 16e9b2fda03ae809958527768cff94779e682692..c54ba52f2dd4df3deb7b3cd647e4f3f0d7b8ea19 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -24,6 +24,7 @@ struct ext4_system_zone { struct rb_node node; ext4_fsblk_t start_blk; unsigned int count; + u32 ino; }; static struct kmem_cache *ext4_system_zone_cachep; @@ -45,7 +46,8 @@ void ext4_exit_system_zone(void) static inline int can_merge(struct ext4_system_zone *entry1, struct ext4_system_zone *entry2) { - if ((entry1->start_blk + entry1->count) == entry2->start_blk) + if ((entry1->start_blk + entry1->count) == entry2->start_blk && + entry1->ino == entry2->ino) return 1; return 0; } @@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks) */ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, - unsigned int count) + unsigned int count, u32 ino) { - struct ext4_system_zone *new_entry = NULL, *entry; + struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else { - if (start_blk + count > (entry->start_blk + - entry->count)) - entry->count = (start_blk + count - - entry->start_blk); - new_node = *n; - new_entry = rb_entry(new_node, struct ext4_system_zone, - node); - break; - } + else /* Unexpected overlap of system zones. */ + return -EFSCORRUPTED; } - if (!new_entry) { - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_node = &new_entry->node; - - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &system_blks->root); - } + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_entry->ino = ino; + new_node = &new_entry->node; + + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); @@ -151,40 +144,6 @@ static void debug_print_tree(struct ext4_sb_info *sbi) printk(KERN_CONT "\n"); } -/* - * Returns 1 if the passed-in block region (start_blk, - * start_blk+count) is valid; 0 if some part of the block region - * overlaps with filesystem metadata blocks. - */ -static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi, - struct ext4_system_blocks *system_blks, - ext4_fsblk_t start_blk, - unsigned int count) -{ - struct ext4_system_zone *entry; - struct rb_node *n; - - if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || - (start_blk + count < start_blk) || - (start_blk + count > ext4_blocks_count(sbi->s_es))) - return 0; - - if (system_blks == NULL) - return 1; - - n = system_blks->root.rb_node; - while (n) { - entry = rb_entry(n, struct ext4_system_zone, node); - if (start_blk + count - 1 < entry->start_blk) - n = n->rb_left; - else if (start_blk >= (entry->start_blk + entry->count)) - n = n->rb_right; - else - return 0; - } - return 1; -} - static int ext4_protect_reserved_inode(struct super_block *sb, struct ext4_system_blocks *system_blks, u32 ino) @@ -214,19 +173,18 @@ static int ext4_protect_reserved_inode(struct super_block *sb, if (n == 0) { i++; } else { - if (!ext4_data_block_valid_rcu(sbi, system_blks, - map.m_pblk, n)) { - err = -EFSCORRUPTED; - __ext4_error(sb, __func__, __LINE__, -err, - map.m_pblk, "blocks %llu-%llu " - "from inode %u overlap system zone", - map.m_pblk, - map.m_pblk + map.m_len - 1, ino); + err = add_system_zone(system_blks, map.m_pblk, n, ino); + if (err < 0) { + if (err == -EFSCORRUPTED) { + __ext4_error(sb, __func__, __LINE__, + -err, map.m_pblk, + "blocks %llu-%llu from inode %u overlap system zone", + map.m_pblk, + map.m_pblk + map.m_len - 1, + ino); + } break; } - err = add_system_zone(system_blks, map.m_pblk, n); - if (err < 0) - break; i += n; } } @@ -262,14 +220,6 @@ int ext4_setup_system_zone(struct super_block *sb) int flex_size = ext4_flex_bg_size(sbi); int ret; - if (!test_opt(sb, BLOCK_VALIDITY)) { - if (sbi->system_blks) - ext4_release_system_zone(sb); - return 0; - } - if (sbi->system_blks) - return 0; - system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; @@ -277,22 +227,25 @@ int ext4_setup_system_zone(struct super_block *sb) for (i=0; i < ngroups; i++) { cond_resched(); if (ext4_bg_has_super(sb, i) && - ((i < 5) || ((i % flex_size) == 0))) - add_system_zone(system_blks, + ((i < 5) || ((i % flex_size) == 0))) { + ret = add_system_zone(system_blks, ext4_group_first_block_no(sb, i), - ext4_bg_num_gdb(sb, i) + 1); + ext4_bg_num_gdb(sb, i) + 1, 0); + if (ret) + goto err; + } gdp = ext4_get_group_desc(sb, i, NULL); ret = add_system_zone(system_blks, - ext4_block_bitmap(sb, gdp), 1); + ext4_block_bitmap(sb, gdp), 1, 0); if (ret) goto err; ret = add_system_zone(system_blks, - ext4_inode_bitmap(sb, gdp), 1); + ext4_inode_bitmap(sb, gdp), 1, 0); if (ret) goto err; ret = add_system_zone(system_blks, ext4_inode_table(sb, gdp), - sbi->s_itb_per_group); + sbi->s_itb_per_group, 0); if (ret) goto err; } @@ -341,11 +294,24 @@ void ext4_release_system_zone(struct super_block *sb) call_rcu(&system_blks->rcu, ext4_destroy_system_zone); } -int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, +/* + * Returns 1 if the passed-in block region (start_blk, + * start_blk+count) is valid; 0 if some part of the block region + * overlaps with some other filesystem metadata blocks. + */ +int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, unsigned int count) { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_system_blocks *system_blks; - int ret; + struct ext4_system_zone *entry; + struct rb_node *n; + int ret = 1; + + if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || + (start_blk + count < start_blk) || + (start_blk + count > ext4_blocks_count(sbi->s_es))) + return 0; /* * Lock the system zone to prevent it being released concurrently @@ -354,8 +320,22 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, */ rcu_read_lock(); system_blks = rcu_dereference(sbi->system_blks); - ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk, - count); + if (system_blks == NULL) + goto out_rcu; + + n = system_blks->root.rb_node; + while (n) { + entry = rb_entry(n, struct ext4_system_zone, node); + if (start_blk + count - 1 < entry->start_blk) + n = n->rb_left; + else if (start_blk >= (entry->start_blk + entry->count)) + n = n->rb_right; + else { + ret = (entry->ino == inode->i_ino); + break; + } + } +out_rcu: rcu_read_unlock(); return ret; } @@ -374,8 +354,7 @@ int ext4_check_blockref(const char *function, unsigned int line, while (bref < p+max) { blk = le32_to_cpu(*bref++); if (blk && - unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), - blk, 1))) { + unlikely(!ext4_inode_block_valid(inode, blk, 1))) { ext4_error_inode(inode, function, line, blk, "invalid block"); return -EFSCORRUPTED; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 42f5060f3cdf19748acdbf076fb08f0170cbac72..523e00d7b39246ba8b0fb70dbcbf7a89f37e2bf1 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -434,10 +434,36 @@ struct flex_groups { #define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ -#define EXT4_FL_USER_VISIBLE 0x725BDFFF /* User visible flags */ -#define EXT4_FL_USER_MODIFIABLE 0x624BC0FF /* User modifiable flags */ - -/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ +/* User modifiable flags */ +#define EXT4_FL_USER_MODIFIABLE (EXT4_SECRM_FL | \ + EXT4_UNRM_FL | \ + EXT4_COMPR_FL | \ + EXT4_SYNC_FL | \ + EXT4_IMMUTABLE_FL | \ + EXT4_APPEND_FL | \ + EXT4_NODUMP_FL | \ + EXT4_NOATIME_FL | \ + EXT4_JOURNAL_DATA_FL | \ + EXT4_NOTAIL_FL | \ + EXT4_DIRSYNC_FL | \ + EXT4_TOPDIR_FL | \ + EXT4_EXTENTS_FL | \ + 0x00400000 /* EXT4_EOFBLOCKS_FL */ | \ + EXT4_DAX_FL | \ + EXT4_PROJINHERIT_FL | \ + EXT4_CASEFOLD_FL) + +/* User visible flags */ +#define EXT4_FL_USER_VISIBLE (EXT4_FL_USER_MODIFIABLE | \ + EXT4_DIRTY_FL | \ + EXT4_COMPRBLK_FL | \ + EXT4_NOCOMPR_FL | \ + EXT4_ENCRYPT_FL | \ + EXT4_INDEX_FL | \ + EXT4_VERITY_FL | \ + EXT4_INLINE_DATA_FL) + +/* Flags we can manipulate with through FS_IOC_FSSETXATTR */ #define EXT4_FL_XFLAG_VISIBLE (EXT4_SYNC_FL | \ EXT4_IMMUTABLE_FL | \ EXT4_APPEND_FL | \ @@ -669,8 +695,6 @@ enum { /* * ioctl commands */ -#define EXT4_IOC_GETFLAGS FS_IOC_GETFLAGS -#define EXT4_IOC_SETFLAGS FS_IOC_SETFLAGS #define EXT4_IOC_GETVERSION _IOR('f', 3, long) #define EXT4_IOC_SETVERSION _IOW('f', 4, long) #define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION @@ -687,17 +711,11 @@ enum { #define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) #define EXT4_IOC_SWAP_BOOT _IO('f', 17) #define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) -#define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY -#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT -#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY /* ioctl codes 19--39 are reserved for fscrypt */ #define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40) #define EXT4_IOC_GETSTATE _IOW('f', 41, __u32) #define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap) -#define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR -#define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR - #define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32) /* @@ -722,8 +740,6 @@ enum { /* * ioctl commands in 32 bit emulation */ -#define EXT4_IOC32_GETFLAGS FS_IOC32_GETFLAGS -#define EXT4_IOC32_SETFLAGS FS_IOC32_SETFLAGS #define EXT4_IOC32_GETVERSION _IOR('f', 3, int) #define EXT4_IOC32_SETVERSION _IOW('f', 4, int) #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) @@ -1054,6 +1070,7 @@ struct ext4_inode_info { struct timespec64 i_crtime; /* mballoc */ + atomic_t i_prealloc_active; struct list_head i_prealloc_list; spinlock_t i_prealloc_lock; @@ -1172,6 +1189,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */ +#define EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS 0x4000000 #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ @@ -1501,10 +1519,13 @@ struct ext4_sb_info { unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; + unsigned int s_mb_max_inode_prealloc; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; unsigned long s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; /* stats for buddy allocator */ atomic_t s_bal_reqs; /* number of reqs with len > 1 */ @@ -1572,6 +1593,8 @@ struct ext4_sb_info { struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; /* Encryption context for '-o test_dummy_encryption' */ struct fscrypt_dummy_context s_dummy_enc_ctx; @@ -1585,6 +1608,9 @@ struct ext4_sb_info { #ifdef CONFIG_EXT4_DEBUG unsigned long s_simulate_fail; #endif + /* Record the errseq of the backing block device */ + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) @@ -2313,9 +2339,15 @@ struct ext4_lazy_init { struct mutex li_list_mtx; }; +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP, + EXT4_LI_MODE_ITABLE, +}; + struct ext4_li_request { struct super_block *lr_super; - struct ext4_sb_info *lr_sbi; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; ext4_group_t lr_next_group; struct list_head lr_request; unsigned long lr_next_sched; @@ -2446,7 +2478,8 @@ extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, extern int ext4_should_retry_alloc(struct super_block *sb, int *retries); extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb, - ext4_group_t block_group); + ext4_group_t block_group, + bool ignore_locked); extern int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, struct buffer_head *bh); @@ -2651,9 +2684,15 @@ extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, struct ext4_allocation_request *, int *); extern int ext4_mb_reserve_blocks(struct super_block *, int); -extern void ext4_discard_preallocations(struct inode *); +extern void ext4_discard_preallocations(struct inode *, unsigned int); extern int __init ext4_init_mballoc(void); extern void ext4_exit_mballoc(void); +extern ext4_group_t ext4_mb_prefetch(struct super_block *sb, + ext4_group_t group, + unsigned int nr, int *cnt); +extern void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, + unsigned int nr); + extern void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags); @@ -2765,8 +2804,7 @@ extern int ext4_search_dir(struct buffer_head *bh, struct ext4_filename *fname, unsigned int offset, struct ext4_dir_entry_2 **res_dir); -extern int ext4_generic_delete_entry(handle_t *handle, - struct inode *dir, +extern int ext4_generic_delete_entry(struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, @@ -2924,12 +2962,6 @@ do { \ #endif -extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, - __u32 compat); -extern int ext4_update_rocompat_feature(handle_t *handle, - struct super_block *sb, __u32 rocompat); -extern int ext4_update_incompat_feature(handle_t *handle, - struct super_block *sb, __u32 incompat); extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg); extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, @@ -3145,6 +3177,7 @@ struct ext4_group_info { (1 << EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT) #define EXT4_GROUP_INFO_IBITMAP_CORRUPT \ (1 << EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT) +#define EXT4_GROUP_INFO_BBITMAP_READ_BIT 4 #define EXT4_MB_GRP_NEED_INIT(grp) \ (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) @@ -3159,6 +3192,8 @@ struct ext4_group_info { (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \ (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) +#define EXT4_MB_GRP_TEST_AND_SET_READ(grp) \ + (test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_READ_BIT, &((grp)->bb_state))) #define EXT4_MAX_CONTENTION 8 #define EXT4_CONTENTION_THRESHOLD 2 @@ -3363,9 +3398,9 @@ extern void ext4_release_system_zone(struct super_block *sb); extern int ext4_setup_system_zone(struct super_block *sb); extern int __init ext4_init_system_zone(void); extern void ext4_exit_system_zone(void); -extern int ext4_data_block_valid(struct ext4_sb_info *sbi, - ext4_fsblk_t start_blk, - unsigned int count); +extern int ext4_inode_block_valid(struct inode *inode, + ext4_fsblk_t start_blk, + unsigned int count); extern int ext4_check_blockref(const char *, unsigned int, struct inode *, __le32 *, unsigned int); diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 0c76cdd44d90d02316114172da2ea8bd5120e6a9..760b9ee49dc004dec695a1db564d2acba776ad21 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -195,6 +195,28 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line, jbd2_journal_abort_handle(handle); } +static void ext4_check_bdev_write_error(struct super_block *sb) +{ + struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; + struct ext4_sb_info *sbi = EXT4_SB(sb); + int err; + + /* + * If the block device has write error flag, it may have failed to + * async write out metadata buffers in the background. In this case, + * we could read old data from disk and write it out again, which + * may lead to on-disk filesystem inconsistency. + */ + if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) { + spin_lock(&sbi->s_bdev_wb_lock); + err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err); + spin_unlock(&sbi->s_bdev_wb_lock); + if (err) + ext4_error_err(sb, -err, + "Error while async write back metadata"); + } +} + int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct buffer_head *bh) { @@ -202,6 +224,9 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line, might_sleep(); + if (bh->b_bdev->bd_super) + ext4_check_bdev_write_error(bh->b_bdev->bd_super); + if (ext4_handle_valid(handle)) { err = jbd2_journal_get_write_access(handle, bh); if (err) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 221f240eae60475f5dd2503ab0542f4ab9c82e5c..a0481582187a35c42997caf4aa7ad9a4380fa8f6 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; @@ -340,7 +340,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) */ if (lblock + len <= lblock) return 0; - return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); + return ext4_inode_block_valid(inode, block, len); } static int ext4_valid_extent_idx(struct inode *inode, @@ -348,7 +348,7 @@ static int ext4_valid_extent_idx(struct inode *inode, { ext4_fsblk_t block = ext4_idx_pblock(ext_idx); - return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); + return ext4_inode_block_valid(inode, block, 1); } static int ext4_valid_extent_entries(struct inode *inode, @@ -507,14 +507,10 @@ __read_extent_tree_block(const char *function, unsigned int line, } if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) return bh; - if (!ext4_has_feature_journal(inode->i_sb) || - (inode->i_ino != - le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { - err = __ext4_ext_check(function, line, inode, - ext_block_hdr(bh), depth, pblk); - if (err) - goto errout; - } + err = __ext4_ext_check(function, line, inode, + ext_block_hdr(bh), depth, pblk); + if (err) + goto errout; set_buffer_verified(bh); /* * If this is a leaf block, cache all of its entries @@ -693,10 +689,8 @@ void ext4_ext_drop_refs(struct ext4_ext_path *path) return; depth = path->p_depth; for (i = 0; i <= depth; i++, path++) { - if (path->p_bh) { - brelse(path->p_bh); - path->p_bh = NULL; - } + brelse(path->p_bh); + path->p_bh = NULL; } } @@ -1915,7 +1909,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, /* * ext4_ext_insert_extent: - * tries to merge requsted extent into the existing extent or + * tries to merge requested extent into the existing extent or * inserts requested extent as new one into the tree, * creating new leaf in the no-space case. */ @@ -3125,7 +3119,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) * * * Splits extent [a, b] into two extents [a, @split) and [@split, b], states - * of which are deterimined by split_flag. + * of which are determined by split_flag. * * There are two cases: * a> the extent are splitted into two extent. @@ -3650,7 +3644,7 @@ static int ext4_split_convert_extents(handle_t *handle, eof_block = map->m_lblk + map->m_len; /* * It is safe to convert extent to initialized via explicit - * zeroout only if extent is fully insde i_size or new_size. + * zeroout only if extent is fully inside i_size or new_size. */ depth = ext_depth(inode); ex = path[depth].p_ext; @@ -4272,7 +4266,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, * not a good idea to call discard here directly, * but otherwise we'd need to call it every free(). */ - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; ext4_free_blocks(handle, inode, NULL, newblock, @@ -4495,7 +4489,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, } /* - * Round up offset. This is not fallocate, we neet to zero out + * Round up offset. This is not fallocate, we need to zero out * blocks, so convert interior block aligned part of the range to * unwritten and possibly manually zero out unaligned parts of the * range. @@ -5299,7 +5293,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) } down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); @@ -5313,7 +5307,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_ext_shift_extents(inode, handle, punch_stop, punch_stop - punch_start, SHIFT_LEFT); @@ -5445,7 +5439,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { @@ -5579,7 +5573,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1, } ex1 = path1[path1->p_depth].p_ext; ex2 = path2[path2->p_depth].p_ext; - /* Do we have somthing to swap ? */ + /* Do we have something to swap ? */ if (unlikely(!ex2 || !ex1)) goto finish; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 129cc1dd6b79520d9510f614f16a45906633d547..7d61069531d313b53a314db86d229d09c88cde29 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -145,10 +145,9 @@ static int ext4_release_file(struct inode *inode, struct file *filp) /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1) && - !EXT4_I(inode)->i_reserved_data_blocks) - { + !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) @@ -428,6 +427,10 @@ static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from, */ if (*ilock_shared && (!IS_NOSEC(inode) || *extend || !ext4_overwrite_io(inode, offset, count))) { + if (iocb->ki_flags & IOCB_NOWAIT) { + ret = -EAGAIN; + goto out; + } inode_unlock_shared(inode); *ilock_shared = false; inode_lock(inode); @@ -812,7 +815,7 @@ static int ext4_sample_last_mounted(struct super_block *sb, return err; } -static int ext4_file_open(struct inode * inode, struct file * filp) +static int ext4_file_open(struct inode *inode, struct file *filp) { int ret; diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index 3e133793a5a3466077d84e807b9dd4c729b51cec..2924261226e0fa6dcc83f849125e0a879b0f869f 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -233,7 +233,7 @@ static int __ext4fs_dirhash(const char *name, int len, break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; - /* fall through */ + fallthrough; case DX_HASH_HALF_MD4: p = name; while (len > 0) { @@ -247,7 +247,7 @@ static int __ext4fs_dirhash(const char *name, int len, break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; - /* fall through */ + fallthrough; case DX_HASH_TEA: p = name; while (len > 0) { diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index be2b66eb65f7a398329e292454de667befb417f1..80c9f33800bea2c3911158889edb929f3c86da4c 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; @@ -858,8 +858,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, else if (ext4_should_journal_data(inode)) flags |= EXT4_FREE_BLOCKS_FORGET; - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, - count)) { + if (!ext4_inode_block_valid(inode, block_to_free, count)) { EXT4_ERROR_INODE(inode, "attempt to clear invalid " "blocks %llu len %lu", (unsigned long long) block_to_free, count); @@ -1004,8 +1003,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, if (!nr) continue; /* A hole */ - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), - nr, 1)) { + if (!ext4_inode_block_valid(inode, nr, 1)) { EXT4_ERROR_INODE(inode, "invalid indirect mapped " "block %lu (level %d)", @@ -1182,21 +1180,21 @@ void ext4_ind_truncate(handle_t *handle, struct inode *inode) ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_IND_BLOCK: nr = i_data[EXT4_DIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_DIND_BLOCK: nr = i_data[EXT4_TIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_TIND_BLOCK: ; } @@ -1436,7 +1434,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode, ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_IND_BLOCK: if (++n >= n2) break; @@ -1445,7 +1443,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode, ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_DIND_BLOCK: if (++n >= n2) break; @@ -1454,7 +1452,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode, ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_TIND_BLOCK: ; } diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index c3a1ad2db1227bbcec24468ec50a83e4cad8e4c3..75c97bca081561b5b6069bc55c1e2e24910421d7 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -276,7 +276,7 @@ static int ext4_create_inline_data(handle_t *handle, len = 0; } - /* Insert the the xttr entry. */ + /* Insert the xttr entry. */ i.value = value; i.value_len = len; @@ -1706,7 +1706,7 @@ int ext4_delete_inline_entry(handle_t *handle, if (err) goto out; - err = ext4_generic_delete_entry(handle, dir, de_del, bh, + err = ext4_generic_delete_entry(dir, de_del, bh, inline_start, inline_size, 0); if (err) goto out; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 44bad4bb8831d028e5828747c6fffcd98ca66c18..bf596467c234ce052c4b784cea15c30cd7da118d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -383,7 +383,7 @@ void ext4_da_update_reserve_space(struct inode *inode, */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); } static int __check_block_validity(struct inode *inode, const char *func, @@ -394,8 +394,7 @@ static int __check_block_validity(struct inode *inode, const char *func, (inode->i_ino == le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) return 0; - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, - map->m_len)) { + if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, @@ -3288,7 +3287,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait) if (PageChecked(page)) return 0; if (journal) - return jbd2_journal_try_to_free_buffers(journal, page, wait); + return jbd2_journal_try_to_free_buffers(journal, page); else return try_to_free_buffers(page); } @@ -4056,7 +4055,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) if (stop_block > first_block) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); @@ -4163,7 +4162,7 @@ int ext4_truncate(struct inode *inode) trace_ext4_truncate_enter(inode); if (!ext4_can_truncate(inode)) - return 0; + goto out_trace; if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); @@ -4172,16 +4171,14 @@ int ext4_truncate(struct inode *inode) int has_inline = 1; err = ext4_inline_data_truncate(inode, &has_inline); - if (err) - return err; - if (has_inline) - return 0; + if (err || has_inline) + goto out_trace; } /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { if (ext4_inode_attach_jinode(inode) < 0) - return 0; + goto out_trace; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) @@ -4190,8 +4187,10 @@ int ext4_truncate(struct inode *inode) credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto out_trace; + } if (inode->i_size & (inode->i_sb->s_blocksize - 1)) ext4_block_truncate_page(handle, mapping, inode->i_size); @@ -4211,7 +4210,7 @@ int ext4_truncate(struct inode *inode) down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) err = ext4_ext_truncate(handle, inode); @@ -4242,6 +4241,7 @@ int ext4_truncate(struct inode *inode) err = err2; ext4_journal_stop(handle); +out_trace: trace_ext4_truncate_exit(inode); return err; } @@ -4760,7 +4760,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ret = 0; if (ei->i_file_acl && - !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { + !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) { ext4_error_inode(inode, function, line, 0, "iget: bad extended attribute block %llu", ei->i_file_acl); @@ -4901,7 +4901,7 @@ static void __ext4_update_other_inode_time(struct super_block *sb, (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 999cf6add39c62de85c8accfd60ef515d0fe92bf..36eca3bc036af3a94348f562d3cd671f85a32e63 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -202,7 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb, reset_inode_seed(inode); reset_inode_seed(inode_bl); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); err = ext4_mark_inode_dirty(handle, inode); if (err < 0) { @@ -819,12 +819,12 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case FS_IOC_GETFSMAP: return ext4_ioc_getfsmap(sb, (void __user *)arg); - case EXT4_IOC_GETFLAGS: + case FS_IOC_GETFLAGS: flags = ei->i_flags & EXT4_FL_USER_VISIBLE; if (S_ISREG(inode->i_mode)) flags &= ~EXT4_PROJINHERIT_FL; return put_user(flags, (int __user *) arg); - case EXT4_IOC_SETFLAGS: { + case FS_IOC_SETFLAGS: { int err; if (!inode_owner_or_capable(inode)) @@ -1129,12 +1129,12 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case EXT4_IOC_PRECACHE_EXTENTS: return ext4_ext_precache(inode); - case EXT4_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_SET_ENCRYPTION_POLICY: if (!ext4_has_feature_encrypt(sb)) return -EOPNOTSUPP; return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); - case EXT4_IOC_GET_ENCRYPTION_PWSALT: { + case FS_IOC_GET_ENCRYPTION_PWSALT: { #ifdef CONFIG_FS_ENCRYPTION int err, err2; struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -1174,7 +1174,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EOPNOTSUPP; #endif } - case EXT4_IOC_GET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_POLICY: if (!ext4_has_feature_encrypt(sb)) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); @@ -1236,7 +1236,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case EXT4_IOC_GET_ES_CACHE: return ext4_ioctl_get_es_cache(filp, arg); - case EXT4_IOC_FSGETXATTR: + case FS_IOC_FSGETXATTR: { struct fsxattr fa; @@ -1247,7 +1247,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; return 0; } - case EXT4_IOC_FSSETXATTR: + case FS_IOC_FSSETXATTR: { struct fsxattr fa, old_fa; int err; @@ -1313,11 +1313,11 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* These are just misnamed, they actually get/put from/to user an int */ switch (cmd) { - case EXT4_IOC32_GETFLAGS: - cmd = EXT4_IOC_GETFLAGS; + case FS_IOC32_GETFLAGS: + cmd = FS_IOC_GETFLAGS; break; - case EXT4_IOC32_SETFLAGS: - cmd = EXT4_IOC_SETFLAGS; + case FS_IOC32_SETFLAGS: + cmd = FS_IOC_SETFLAGS; break; case EXT4_IOC32_GETVERSION: cmd = EXT4_IOC_GETVERSION; @@ -1361,9 +1361,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case EXT4_IOC_RESIZE_FS: case FITRIM: case EXT4_IOC_PRECACHE_EXTENTS: - case EXT4_IOC_SET_ENCRYPTION_POLICY: - case EXT4_IOC_GET_ENCRYPTION_PWSALT: - case EXT4_IOC_GET_ENCRYPTION_POLICY: + case FS_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_PWSALT: + case FS_IOC_GET_ENCRYPTION_POLICY: case FS_IOC_GET_ENCRYPTION_POLICY_EX: case FS_IOC_ADD_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY: @@ -1377,8 +1377,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case EXT4_IOC_CLEAR_ES_CACHE: case EXT4_IOC_GETSTATE: case EXT4_IOC_GET_ES_CACHE: - case EXT4_IOC_FSGETXATTR: - case EXT4_IOC_FSSETXATTR: + case FS_IOC_FSGETXATTR: + case FS_IOC_FSSETXATTR: break; default: return -ENOIOCTLCMD; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c0a331e2feb02454a10f111b4d7f7f6c1cc29f09..132c118d12e153cfb72ac12a3402174922ec36c8 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -922,7 +922,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) bh[i] = NULL; continue; } - bh[i] = ext4_read_block_bitmap_nowait(sb, group); + bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); if (IS_ERR(bh[i])) { err = PTR_ERR(bh[i]); bh[i] = NULL; @@ -1279,9 +1279,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, e4b->bd_buddy_page = page; e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); - BUG_ON(e4b->bd_bitmap_page == NULL); - BUG_ON(e4b->bd_buddy_page == NULL); - return 0; err: @@ -1743,10 +1740,6 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, } -/* - * regular allocator, for general purposes allocation - */ - static void ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group) @@ -2119,13 +2112,11 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, BUG_ON(cr < 0 || cr >= 4); - free = grp->bb_free; - if (free == 0) - return false; - if (cr <= 2 && free < ac->ac_g_ex.fe_len) + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return false; - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) + free = grp->bb_free; + if (free == 0) return false; fragments = grp->bb_fragments; @@ -2142,8 +2133,10 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, ((group % flex_size) == 0)) return false; - if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || - (free / fragments) >= ac->ac_g_ex.fe_len) + if (free < ac->ac_g_ex.fe_len) + return false; + + if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) return true; if (grp->bb_largest_free_order < ac->ac_2order) @@ -2177,6 +2170,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, { struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct super_block *sb = ac->ac_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; ext4_grpblk_t free; int ret = 0; @@ -2195,7 +2189,25 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); + struct ext4_group_desc *gdp = + ext4_get_group_desc(sb, group, NULL); + int ret; + + /* cr=0/1 is a very optimistic search to find large + * good chunks almost for free. If buddy data is not + * ready, then this optimization makes no sense. But + * we never skip the first block group in a flex_bg, + * since this gets used for metadata block allocation, + * and we want to make sure we locate metadata blocks + * in the first block group in the flex_bg if possible. + */ + if (cr < 2 && + (!sbi->s_log_groups_per_flex || + ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && + !(ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) + return 0; + ret = ext4_mb_init_group(sb, group, GFP_NOFS); if (ret) return ret; } @@ -2209,15 +2221,95 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, return ret; } +/* + * Start prefetching @nr block bitmaps starting at @group. + * Return the next group which needs to be prefetched. + */ +ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, + unsigned int nr, int *cnt) +{ + ext4_group_t ngroups = ext4_get_groups_count(sb); + struct buffer_head *bh; + struct blk_plug plug; + + blk_start_plug(&plug); + while (nr-- > 0) { + struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, + NULL); + struct ext4_group_info *grp = ext4_get_group_info(sb, group); + + /* + * Prefetch block groups with free blocks; but don't + * bother if it is marked uninitialized on disk, since + * it won't require I/O to read. Also only try to + * prefetch once, so we avoid getblk() call, which can + * be expensive. + */ + if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && + EXT4_MB_GRP_NEED_INIT(grp) && + ext4_free_group_clusters(sb, gdp) > 0 && + !(ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { + bh = ext4_read_block_bitmap_nowait(sb, group, true); + if (bh && !IS_ERR(bh)) { + if (!buffer_uptodate(bh) && cnt) + (*cnt)++; + brelse(bh); + } + } + if (++group >= ngroups) + group = 0; + } + blk_finish_plug(&plug); + return group; +} + +/* + * Prefetching reads the block bitmap into the buffer cache; but we + * need to make sure that the buddy bitmap in the page cache has been + * initialized. Note that ext4_mb_init_group() will block if the I/O + * is not yet completed, or indeed if it was not initiated by + * ext4_mb_prefetch did not start the I/O. + * + * TODO: We should actually kick off the buddy bitmap setup in a work + * queue when the buffer I/O is completed, so that we don't block + * waiting for the block allocation bitmap read to finish when + * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). + */ +void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, + unsigned int nr) +{ + while (nr-- > 0) { + struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, + NULL); + struct ext4_group_info *grp = ext4_get_group_info(sb, group); + + if (!group) + group = ext4_get_groups_count(sb); + group--; + grp = ext4_get_group_info(sb, group); + + if (EXT4_MB_GRP_NEED_INIT(grp) && + ext4_free_group_clusters(sb, gdp) > 0 && + !(ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { + if (ext4_mb_init_group(sb, group, GFP_NOFS)) + break; + } + } +} + static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { - ext4_group_t ngroups, group, i; + ext4_group_t prefetch_grp = 0, ngroups, group, i; int cr = -1; int err = 0, first_err = 0; + unsigned int nr = 0, prefetch_ios = 0; struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; + int lost; sb = ac->ac_sb; sbi = EXT4_SB(sb); @@ -2237,8 +2329,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) goto out; /* - * ac->ac2_order is set only if the fe_len is a power of 2 - * if ac2_order is set we also set criteria to 0 so that we + * ac->ac_2order is set only if the fe_len is a power of 2 + * if ac->ac_2order is set we also set criteria to 0 so that we * try exact allocation using buddy. */ i = fls(ac->ac_g_ex.fe_len); @@ -2282,6 +2374,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) * from the goal value specified */ group = ac->ac_g_ex.fe_group; + prefetch_grp = group; for (i = 0; i < ngroups; group++, i++) { int ret = 0; @@ -2293,6 +2386,29 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) if (group >= ngroups) group = 0; + /* + * Batch reads of the block allocation bitmaps + * to get multiple READs in flight; limit + * prefetching at cr=0/1, otherwise mballoc can + * spend a lot of time loading imperfect groups + */ + if ((prefetch_grp == group) && + (cr > 1 || + prefetch_ios < sbi->s_mb_prefetch_limit)) { + unsigned int curr_ios = prefetch_ios; + + nr = sbi->s_mb_prefetch; + if (ext4_has_feature_flex_bg(sb)) { + nr = (group / sbi->s_mb_prefetch) * + sbi->s_mb_prefetch; + nr = nr + sbi->s_mb_prefetch - group; + } + prefetch_grp = ext4_mb_prefetch(sb, group, + nr, &prefetch_ios); + if (prefetch_ios == curr_ios) + nr = 0; + } + /* This now checks without needing the buddy page */ ret = ext4_mb_good_group_nolock(ac, group, cr); if (ret <= 0) { @@ -2341,22 +2457,24 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) * We've been searching too long. Let's try to allocate * the best chunk we've found so far */ - ext4_mb_try_best_found(ac, &e4b); if (ac->ac_status != AC_STATUS_FOUND) { /* * Someone more lucky has already allocated it. * The only thing we can do is just take first * found block(s) - printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); */ + lost = atomic_inc_return(&sbi->s_mb_lost_chunks); + mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", + ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, + ac->ac_b_ex.fe_len, lost); + ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = 3; - atomic_inc(&sbi->s_mb_lost_chunks); goto repeat; } } @@ -2367,6 +2485,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, ac->ac_flags, cr, err); + + if (nr) + ext4_mb_prefetch_fini(sb, prefetch_grp, nr); + return err; } @@ -2439,7 +2561,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) for (i = 0; i <= 13; i++) seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? sg.info.bb_counters[i] : 0); - seq_printf(seq, " ]\n"); + seq_puts(seq, " ]\n"); return 0; } @@ -2613,6 +2735,26 @@ static int ext4_mb_init_backend(struct super_block *sb) goto err_freebuddy; } + if (ext4_has_feature_flex_bg(sb)) { + /* a single flex group is supposed to be read by a single IO */ + sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex; + sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ + } else { + sbi->s_mb_prefetch = 32; + } + if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) + sbi->s_mb_prefetch = ext4_get_groups_count(sb); + /* now many real IOs to prefetch within a single allocation at cr=0 + * given cr=0 is an CPU-related optimization we shouldn't try to + * load too many groups, at some point we should start to use what + * we've got in memory. + * with an average random access time 5ms, it'd take a second to get + * 200 groups (* N with flex_bg), so let's make this limit 4 + */ + sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; + if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) + sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); + return 0; err_freebuddy: @@ -2736,6 +2878,7 @@ int ext4_mb_init(struct super_block *sb) sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; + sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file @@ -3090,7 +3233,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); - if (!ext4_data_block_valid(sbi, block, len)) { + if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error @@ -3674,6 +3817,26 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); } +static void ext4_mb_mark_pa_deleted(struct super_block *sb, + struct ext4_prealloc_space *pa) +{ + struct ext4_inode_info *ei; + + if (pa->pa_deleted) { + ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", + pa->pa_type, pa->pa_pstart, pa->pa_lstart, + pa->pa_len); + return; + } + + pa->pa_deleted = 1; + + if (pa->pa_type == MB_INODE_PA) { + ei = EXT4_I(pa->pa_inode); + atomic_dec(&ei->i_prealloc_active); + } +} + static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; @@ -3706,7 +3869,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, return; } - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; @@ -3830,6 +3993,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); + atomic_inc(&ei->i_prealloc_active); } /* @@ -4040,7 +4204,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, } /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); /* we can trust pa_free ... */ free += pa->pa_free; @@ -4103,7 +4267,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, * * FIXME!! Make sure it is valid at all the call sites */ -void ext4_discard_preallocations(struct inode *inode) +void ext4_discard_preallocations(struct inode *inode, unsigned int needed) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; @@ -4121,15 +4285,19 @@ void ext4_discard_preallocations(struct inode *inode) mb_debug(sb, "discard preallocation for inode %lu\n", inode->i_ino); - trace_ext4_discard_preallocations(inode); + trace_ext4_discard_preallocations(inode, + atomic_read(&ei->i_prealloc_active), needed); INIT_LIST_HEAD(&list); + if (needed == 0) + needed = UINT_MAX; + repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); - while (!list_empty(&ei->i_prealloc_list)) { - pa = list_entry(ei->i_prealloc_list.next, + while (!list_empty(&ei->i_prealloc_list) && needed) { + pa = list_entry(ei->i_prealloc_list.prev, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); @@ -4146,10 +4314,11 @@ void ext4_discard_preallocations(struct inode *inode) } if (pa->pa_deleted == 0) { - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); + needed--; continue; } @@ -4399,7 +4568,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ac->ac_g_ex = ac->ac_o_ex; ac->ac_flags = ar->flags; - /* we have to define context: we'll we work with a file or + /* we have to define context: we'll work with a file or * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); @@ -4450,7 +4619,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); @@ -4548,11 +4717,30 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) return ; } +/* + * if per-inode prealloc list is too long, trim some PA + */ +static void ext4_mb_trim_inode_pa(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int count, delta; + + count = atomic_read(&ei->i_prealloc_active); + delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; + if (count > sbi->s_mb_max_inode_prealloc + delta) { + count -= sbi->s_mb_max_inode_prealloc; + ext4_discard_preallocations(inode, count); + } +} + /* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { + struct inode *inode = ac->ac_inode; + struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { @@ -4564,21 +4752,31 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) pa->pa_free -= ac->ac_b_ex.fe_len; pa->pa_len -= ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); + + /* + * We want to add the pa to the right bucket. + * Remove it from the list and while adding + * make sure the list to which we are adding + * doesn't grow big. + */ + if (likely(pa->pa_free)) { + spin_lock(pa->pa_obj_lock); + list_del_rcu(&pa->pa_inode_list); + spin_unlock(pa->pa_obj_lock); + ext4_mb_add_n_trim(ac); + } } - } - if (pa) { - /* - * We want to add the pa to the right bucket. - * Remove it from the list and while adding - * make sure the list to which we are adding - * doesn't grow big. - */ - if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { + + if (pa->pa_type == MB_INODE_PA) { + /* + * treat per-inode prealloc list as a lru list, then try + * to trim the least recently used PA. + */ spin_lock(pa->pa_obj_lock); - list_del_rcu(&pa->pa_inode_list); + list_move(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); - ext4_mb_add_n_trim(ac); } + ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) @@ -4588,6 +4786,7 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); + ext4_mb_trim_inode_pa(inode); return 0; } @@ -4915,7 +5114,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && - !ext4_data_block_valid(sbi, block, count)) { + !ext4_inode_block_valid(inode, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 6b4d17c2935d6eca8e0cae5d69b8d283ce754175..e75b4749aa1c27cfb1f712bfdbf561801b312d20 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -73,6 +73,10 @@ */ #define MB_DEFAULT_GROUP_PREALLOC 512 +/* + * maximum length of inode prealloc list + */ +#define MB_DEFAULT_MAX_INODE_PREALLOC 512 struct ext4_free_data { /* this links the free block information from sb_info */ diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 1ed86fb6c3026ebb2025a4434c01a47720875375..0d601b8228753e2b975d24c9098c5c77a8dbec04 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -686,8 +686,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, out: if (*moved_len) { - ext4_discard_preallocations(orig_inode); - ext4_discard_preallocations(donor_inode); + ext4_discard_preallocations(orig_inode, 0); + ext4_discard_preallocations(donor_inode, 0); } ext4_ext_drop_refs(path); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 56738b538ddf4c066df33a59a06020352313c749..153a9fbe1dd06948dd76fa1041c086342d802a19 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, + buf_size, offset)) return -1; *res_dir = de; return 1; @@ -1858,7 +1858,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); - /* Split the existing block in the middle, size-wise */ + /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { @@ -1868,8 +1868,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, size += map[i].size; move++; } - /* map index at which we will split */ - split = count - move; + /* + * map index at which we will split + * + * If the sum of active entries didn't exceed half the block size, just + * split it in half by count; each resulting block will have at least + * half the space free. + */ + if (i > 0) + split = count - move; + else + split = count/2; + hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", @@ -2455,8 +2465,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname, * ext4_generic_delete_entry deletes a directory entry by merging it * with the previous entry */ -int ext4_generic_delete_entry(handle_t *handle, - struct inode *dir, +int ext4_generic_delete_entry(struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, @@ -2472,7 +2481,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, i)) + entry_buf, buf_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) @@ -2517,8 +2526,7 @@ static int ext4_delete_entry(handle_t *handle, if (unlikely(err)) goto out; - err = ext4_generic_delete_entry(handle, dir, de_del, - bh, bh->b_data, + err = ext4_generic_delete_entry(dir, de_del, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) goto out; @@ -3193,30 +3201,33 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) * in separate transaction */ retval = dquot_initialize(dir); if (retval) - return retval; + goto out_trace; retval = dquot_initialize(d_inode(dentry)); if (retval) - return retval; + goto out_trace; - retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); - if (IS_ERR(bh)) - return PTR_ERR(bh); - if (!bh) - goto end_unlink; + if (IS_ERR(bh)) { + retval = PTR_ERR(bh); + goto out_trace; + } + if (!bh) { + retval = -ENOENT; + goto out_trace; + } inode = d_inode(dentry); - retval = -EFSCORRUPTED; - if (le32_to_cpu(de->inode) != inode->i_ino) - goto end_unlink; + if (le32_to_cpu(de->inode) != inode->i_ino) { + retval = -EFSCORRUPTED; + goto out_bh; + } handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); - handle = NULL; - goto end_unlink; + goto out_bh; } if (IS_DIRSYNC(dir)) @@ -3224,12 +3235,12 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) retval = ext4_delete_entry(handle, dir, de, bh); if (retval) - goto end_unlink; + goto out_handle; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); retval = ext4_mark_inode_dirty(handle, dir); if (retval) - goto end_unlink; + goto out_handle; if (inode->i_nlink == 0) ext4_warning_inode(inode, "Deleting file '%.*s' with no links", dentry->d_name.len, dentry->d_name.name); @@ -3251,10 +3262,11 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) d_invalidate(dentry); #endif -end_unlink: +out_handle: + ext4_journal_stop(handle); +out_bh: brelse(bh); - if (handle) - ext4_journal_stop(handle); +out_trace: trace_ext4_unlink_exit(dentry, retval); return retval; } diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index f2df2db0786cfd956a3ae02cc4d9588bf5793aa3..f014c5e473a9d228d7c3e66ef0214768725a0540 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -140,7 +140,7 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx) return; } ctx->cur_step++; - /* fall-through */ + fallthrough; case STEP_VERITY: if (ctx->enabled_steps & (1 << STEP_VERITY)) { INIT_WORK(&ctx->work, verity_work); @@ -148,7 +148,7 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx) return; } ctx->cur_step++; - /* fall-through */ + fallthrough; default: __read_end_io(ctx->bio); } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 0907f907c47d1f622e1c15bfacae9c10273ed179..ea425b49b34567c9b20841297a6fb011c627cb6b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); -static void ext4_mark_recovery_complete(struct super_block *sb, +static int ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); -static void ext4_clear_journal_err(struct super_block *sb, - struct ext4_super_block *es); +static int ext4_clear_journal_err(struct super_block *sb, + struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -744,6 +744,7 @@ void __ext4_msg(struct super_block *sb, struct va_format vaf; va_list args; + atomic_inc(&EXT4_SB(sb)->s_msg_count); if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs")) return; @@ -754,9 +755,12 @@ void __ext4_msg(struct super_block *sb, va_end(args); } -#define ext4_warning_ratelimit(sb) \ - ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \ - "EXT4-fs warning") +static int ext4_warning_ratelimit(struct super_block *sb) +{ + atomic_inc(&EXT4_SB(sb)->s_warning_count); + return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), + "EXT4-fs warning"); +} void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) @@ -1123,6 +1127,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) inode_set_iversion(&ei->vfs_inode, 1); spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); + atomic_set(&ei->i_prealloc_active, 0); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); @@ -1216,7 +1221,7 @@ void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); dquot_drop(inode); if (EXT4_I(inode)->jinode) { @@ -1288,8 +1293,8 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, if (!page_has_buffers(page)) return 0; if (journal) - return jbd2_journal_try_to_free_buffers(journal, page, - wait & ~__GFP_DIRECT_RECLAIM); + return jbd2_journal_try_to_free_buffers(journal, page); + return try_to_free_buffers(page); } @@ -1522,6 +1527,7 @@ enum { Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, + Opt_prefetch_block_bitmaps, }; static const match_table_t tokens = { @@ -1614,6 +1620,7 @@ static const match_table_t tokens = { {Opt_inlinecrypt, "inlinecrypt"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ + {Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ @@ -1831,6 +1838,8 @@ static const struct mount_opts { {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_STRING}, {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, + {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS, + MOPT_SET}, {Opt_err, 0, 0} }; @@ -3213,15 +3222,34 @@ static void print_daily_error_info(struct timer_list *t) static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; - ext4_group_t group, ngroups; - struct super_block *sb; + struct super_block *sb = elr->lr_super; + ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; + ext4_group_t group = elr->lr_next_group; unsigned long timeout = 0; + unsigned int prefetch_ios = 0; int ret = 0; - sb = elr->lr_super; - ngroups = EXT4_SB(sb)->s_groups_count; + if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { + elr->lr_next_group = ext4_mb_prefetch(sb, group, + EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios); + if (prefetch_ios) + ext4_mb_prefetch_fini(sb, elr->lr_next_group, + prefetch_ios); + trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, + prefetch_ios); + if (group >= elr->lr_next_group) { + ret = 1; + if (elr->lr_first_not_zeroed != ngroups && + !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { + elr->lr_next_group = elr->lr_first_not_zeroed; + elr->lr_mode = EXT4_LI_MODE_ITABLE; + ret = 0; + } + } + return ret; + } - for (group = elr->lr_next_group; group < ngroups; group++) { + for (; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; @@ -3239,9 +3267,10 @@ static int ext4_run_li_request(struct ext4_li_request *elr) timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); + trace_ext4_lazy_itable_init(sb, group); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * - elr->lr_sbi->s_li_wait_mult; + EXT4_SB(elr->lr_super)->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; @@ -3256,15 +3285,11 @@ static int ext4_run_li_request(struct ext4_li_request *elr) */ static void ext4_remove_li_request(struct ext4_li_request *elr) { - struct ext4_sb_info *sbi; - if (!elr) return; - sbi = elr->lr_sbi; - list_del(&elr->lr_request); - sbi->s_li_request = NULL; + EXT4_SB(elr->lr_super)->s_li_request = NULL; kfree(elr); } @@ -3473,7 +3498,6 @@ static int ext4_li_info_new(void) static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { - struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; elr = kzalloc(sizeof(*elr), GFP_KERNEL); @@ -3481,8 +3505,13 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, return NULL; elr->lr_super = sb; - elr->lr_sbi = sbi; - elr->lr_next_group = start; + elr->lr_first_not_zeroed = start; + if (test_opt(sb, PREFETCH_BLOCK_BITMAPS)) + elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; + else { + elr->lr_mode = EXT4_LI_MODE_ITABLE; + elr->lr_next_group = start; + } /* * Randomize first schedule time of the request to @@ -3512,8 +3541,9 @@ int ext4_register_li_request(struct super_block *sb, goto out; } - if (first_not_zeroed == ngroups || sb_rdonly(sb) || - !test_opt(sb, INIT_INODE_TABLE)) + if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) && + (first_not_zeroed == ngroups || sb_rdonly(sb) || + !test_opt(sb, INIT_INODE_TABLE))) goto out; elr = ext4_li_request_new(sb, first_not_zeroed); @@ -4710,11 +4740,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_set_resv_clusters(sb); - err = ext4_setup_system_zone(sb); - if (err) { - ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)", err); - goto failed_mount4a; + if (test_opt(sb, BLOCK_VALIDITY)) { + err = ext4_setup_system_zone(sb); + if (err) { + ext4_msg(sb, KERN_ERR, "failed to initialize system " + "zone (%d)", err); + goto failed_mount4a; + } } ext4_ext_init(sb); @@ -4777,12 +4809,23 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } #endif /* CONFIG_QUOTA */ + /* + * Save the original bdev mapping's wb_err value which could be + * used to detect the metadata async write error. + */ + spin_lock_init(&sbi->s_bdev_wb_lock); + if (!sb_rdonly(sb)) + errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, + &sbi->s_bdev_wb_err); + sb->s_bdev->bd_super = sb; EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); - ext4_mark_recovery_complete(sb, es); + err = ext4_mark_recovery_complete(sb, es); + if (err) + goto failed_mount8; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@ -4816,6 +4859,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); + atomic_set(&sbi->s_warning_count, 0); + atomic_set(&sbi->s_msg_count, 0); kfree(orig_data); return 0; @@ -4825,10 +4870,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); -#endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -4968,7 +5011,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, struct inode *journal_inode; journal_t *journal; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; journal_inode = ext4_get_journal_inode(sb, journal_inum); if (!journal_inode) @@ -4998,7 +5042,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, struct ext4_super_block *es; struct block_device *bdev; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) @@ -5089,8 +5134,10 @@ static int ext4_load_journal(struct super_block *sb, dev_t journal_dev; int err = 0; int really_read_only; + int journal_dev_ro; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return -EFSCORRUPTED; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5100,7 +5147,31 @@ static int ext4_load_journal(struct super_block *sb, } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); - really_read_only = bdev_read_only(sb->s_bdev); + if (journal_inum && journal_dev) { + ext4_msg(sb, KERN_ERR, + "filesystem has both journal inode and journal device!"); + return -EINVAL; + } + + if (journal_inum) { + journal = ext4_get_journal(sb, journal_inum); + if (!journal) + return -EINVAL; + } else { + journal = ext4_get_dev_journal(sb, journal_dev); + if (!journal) + return -EINVAL; + } + + journal_dev_ro = bdev_read_only(journal->j_dev); + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; + + if (journal_dev_ro && !sb_rdonly(sb)) { + ext4_msg(sb, KERN_ERR, + "journal device read-only, try mounting with '-o ro'"); + err = -EROFS; + goto err_out; + } /* * Are we loading a blank journal or performing recovery after a @@ -5115,27 +5186,14 @@ static int ext4_load_journal(struct super_block *sb, ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed " "(try mounting with noload)"); - return -EROFS; + err = -EROFS; + goto err_out; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } - if (journal_inum && journal_dev) { - ext4_msg(sb, KERN_ERR, "filesystem has both journal " - "and inode journals!"); - return -EINVAL; - } - - if (journal_inum) { - if (!(journal = ext4_get_journal(sb, journal_inum))) - return -EINVAL; - } else { - if (!(journal = ext4_get_dev_journal(sb, journal_dev))) - return -EINVAL; - } - if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); @@ -5155,12 +5213,16 @@ static int ext4_load_journal(struct super_block *sb, if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); - jbd2_journal_destroy(journal); - return err; + goto err_out; } EXT4_SB(sb)->s_journal = journal; - ext4_clear_journal_err(sb, es); + err = ext4_clear_journal_err(sb, es); + if (err) { + EXT4_SB(sb)->s_journal = NULL; + jbd2_journal_destroy(journal); + return err; + } if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5171,6 +5233,10 @@ static int ext4_load_journal(struct super_block *sb, } return 0; + +err_out: + jbd2_journal_destroy(journal); + return err; } static int ext4_commit_super(struct super_block *sb, int sync) @@ -5182,13 +5248,6 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!sbh || block_device_ejected(sb)) return error; - /* - * The superblock bh should be mapped, but it might not be if the - * device was hot-removed. Not much we can do but fail the I/O. - */ - if (!buffer_mapped(sbh)) - return error; - /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock @@ -5256,26 +5315,32 @@ static int ext4_commit_super(struct super_block *sb, int sync) * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ -static void ext4_mark_recovery_complete(struct super_block *sb, - struct ext4_super_block *es) +static int ext4_mark_recovery_complete(struct super_block *sb, + struct ext4_super_block *es) { + int err; journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { - BUG_ON(journal != NULL); - return; + if (journal != NULL) { + ext4_error(sb, "Journal got removed while the fs was " + "mounted!"); + return -EFSCORRUPTED; + } + return 0; } jbd2_journal_lock_updates(journal); - if (jbd2_journal_flush(journal) < 0) + err = jbd2_journal_flush(journal); + if (err < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } - out: jbd2_journal_unlock_updates(journal); + return err; } /* @@ -5283,14 +5348,17 @@ static void ext4_mark_recovery_complete(struct super_block *sb, * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext4_clear_journal_err(struct super_block *sb, +static int ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; - BUG_ON(!ext4_has_feature_journal(sb)); + if (!ext4_has_feature_journal(sb)) { + ext4_error(sb, "Journal got removed while the fs was mounted!"); + return -EFSCORRUPTED; + } journal = EXT4_SB(sb)->s_journal; @@ -5315,6 +5383,7 @@ static void ext4_clear_journal_err(struct super_block *sb, jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } + return 0; } /* @@ -5457,7 +5526,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long old_sb_flags; + unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; @@ -5500,6 +5569,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from *flags to s_flags + */ + vfs_flags = SB_LAZYTIME | SB_I_VERSION; + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags); + if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -5553,9 +5630,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } - if (*flags & SB_LAZYTIME) - sb->s_flags |= SB_LAZYTIME; - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; @@ -5585,8 +5659,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); - if (sbi->s_journal) + if (sbi->s_journal) { + /* + * We let remount-ro finish even if marking fs + * as clean failed... + */ ext4_mark_recovery_complete(sb, es); + } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); } else { @@ -5628,14 +5707,25 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } + /* + * Update the original bdev mapping's wb_err value + * which could be used to detect the metadata async + * write error. + */ + errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, + &sbi->s_bdev_wb_err); + /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ - if (sbi->s_journal) - ext4_clear_journal_err(sb, es); + if (sbi->s_journal) { + err = ext4_clear_journal_err(sb, es); + if (err) + goto restore_opts; + } sbi->s_mount_state = le16_to_cpu(es->s_state); err = ext4_setup_super(sb, es, 0); @@ -5665,7 +5755,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - ext4_setup_system_zone(sb); + /* + * Handle creation of system zone data early because it can fail. + * Releasing of existing data is done when we are sure remount will + * succeed. + */ + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + } + if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); if (err) @@ -5686,8 +5786,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); + + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from s_flags to *flags + */ + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags); - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; @@ -5701,6 +5809,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 6c9fc9e21c138aded7e3d1fad09349aca1f998e8..bfabb799fa451a429f4a52d8d62311c690b567ce 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -189,6 +189,9 @@ static struct ext4_attr ext4_attr_##_name = { \ #define EXT4_RW_ATTR_SBI_UL(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname) +#define EXT4_RO_ATTR_SBI_ATOMIC(_name,_elname) \ + EXT4_ATTR_OFFSET(_name, 0444, pointer_atomic, ext4_sb_info, _elname) + #define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ @@ -215,6 +218,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); +EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error); EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval); @@ -226,6 +230,8 @@ EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst); #ifdef CONFIG_EXT4_DEBUG EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail); #endif +EXT4_RO_ATTR_SBI_ATOMIC(warning_count, s_warning_count); +EXT4_RO_ATTR_SBI_ATOMIC(msg_count, s_msg_count); EXT4_RO_ATTR_ES_UI(errors_count, s_error_count); EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode); EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode); @@ -240,6 +246,8 @@ EXT4_RO_ATTR_ES_STRING(last_error_func, s_last_error_func, 32); EXT4_ATTR(first_error_time, 0444, first_error_time); EXT4_ATTR(last_error_time, 0444, last_error_time); EXT4_ATTR(journal_task, 0444, journal_task); +EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch); +EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit); static unsigned int old_bump_val = 128; EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val); @@ -257,6 +265,7 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), + ATTR_LIST(mb_max_inode_prealloc), ATTR_LIST(max_writeback_mb_bump), ATTR_LIST(extent_max_zeroout_kb), ATTR_LIST(trigger_fs_error), @@ -267,6 +276,8 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(msg_ratelimit_interval_ms), ATTR_LIST(msg_ratelimit_burst), ATTR_LIST(errors_count), + ATTR_LIST(warning_count), + ATTR_LIST(msg_count), ATTR_LIST(first_error_ino), ATTR_LIST(last_error_ino), ATTR_LIST(first_error_block), @@ -283,6 +294,8 @@ static struct attribute *ext4_attrs[] = { #ifdef CONFIG_EXT4_DEBUG ATTR_LIST(simulate_fail), #endif + ATTR_LIST(mb_prefetch), + ATTR_LIST(mb_prefetch_limit), NULL, }; ATTRIBUTE_GROUPS(ext4); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 7d2f6576d954400700221f42bc74475cb48db1dc..cba4b877c606bb2ef24a1abf32b2bb4aba652eae 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1356,8 +1356,7 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode, block = 0; while (wsize < bufsize) { - if (bh != NULL) - brelse(bh); + brelse(bh); csize = (bufsize - wsize) > blocksize ? blocksize : bufsize - wsize; bh = ext4_getblk(handle, ea_inode, block, 0); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index ed2bca0fce9224bba95b8f1ab315e68038f29882..73683e58a08d51ed4a0e64a2c51812653ade0542 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3550,6 +3550,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, unsigned long align = offset | iov_iter_alignment(iter); struct block_device *bdev = inode->i_sb->s_bdev; + if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode)) + return 1; + if (align & blocksize_mask) { if (bdev) blkbits = blksize_bits(bdev_logical_block_size(bdev)); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 16322ea5b46305b2e1010cec827f0aa19b4ffb73..d9e52a7f3702fb06c5bf562cf09f154896c1d97b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2646,7 +2646,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, case FI_NEW_INODE: if (set) return; - /* fall through */ + fallthrough; case FI_DATA_EXIST: case FI_INLINE_DOTS: case FI_PIN_FILE: diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 9bbaa2614679f84d41062601bdbf0428b8fca21a..cb1b5b61a1dab47f31a6c03c52c4a1d49d02f1b0 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -618,10 +618,10 @@ pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) switch (dn->max_level) { case 3: base += 2 * indirect_blks; - /* fall through */ + fallthrough; case 2: base += 2 * direct_blks; - /* fall through */ + fallthrough; case 1: base += direct_index; break; @@ -2373,6 +2373,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (unlikely(nid >= nm_i->max_nid)) nid = 0; + if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) + nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; + /* Enough entries */ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return 0; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index a65d357f89a9fad7ec74d7716aea268e702c3f4a..e247a5ef3713fb5a1308792fccaec8373ff058f0 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -799,7 +799,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, if (__is_large_section(sbi)) { unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); - unsigned short valid_blocks = + block_t valid_blocks = get_valid_blocks(sbi, segno, true); f2fs_bug_on(sbi, unlikely(!valid_blocks || @@ -815,7 +815,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); - unsigned short valid_blocks; + block_t valid_blocks; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]--; @@ -4316,8 +4316,8 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno = 0, offset = 0, secno; - unsigned short valid_blocks; - unsigned short blks_per_sec = BLKS_PER_SEC(sbi); + block_t valid_blocks; + block_t blks_per_sec = BLKS_PER_SEC(sbi); while (1) { /* find dirty segment based on free segmap */ diff --git a/fs/fcntl.c b/fs/fcntl.c index 2e4c0fa2074b00c7b4ea43715647368bf680d804..19ac5baad50fdcadd886431dece771fcec4efef7 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -362,7 +362,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, case F_OFD_SETLK: case F_OFD_SETLKW: #endif - /* Fallthrough */ + fallthrough; case F_SETLK: case F_SETLKW: if (copy_from_user(&flock, argp, sizeof(flock))) @@ -771,7 +771,7 @@ static void send_sigio_to_task(struct task_struct *p, if (!do_send_sig_info(signum, &si, p, type)) break; } - /* fall-through - fall back on the old plain SIGIO signal */ + fallthrough; /* fall back on the old plain SIGIO signal */ case 0: do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type); } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a605c3dddabc76b3c013870b3246b0ddc15af9fa..149227160ff0b07f7b14594926178a5e8fb8215f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -42,7 +42,6 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; - unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode, struct bdi_writeback *wb) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); + inode->i_state &= ~I_SYNC_QUEUED; list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); } @@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode) struct bdi_writeback *wb; wb = inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); inode_io_list_del_locked(inode, wb); + spin_unlock(&inode->i_lock); spin_unlock(&wb->list_lock); } EXPORT_SYMBOL(inode_io_list_del); @@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) { + assert_spin_locked(&inode->i_lock); + if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) inode->dirtied_when = jiffies; } inode_io_list_move_locked(inode, wb, &wb->b_dirty); + inode->i_state &= ~I_SYNC_QUEUED; +} + +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +{ + spin_lock(&inode->i_lock); + redirty_tail_locked(inode, wb); + spin_unlock(&inode->i_lock); } /* @@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) #define EXPIRE_DIRTY_ATIME 0x0001 /* - * Move expired (dirtied before work->older_than_this) dirty inodes from + * Move expired (dirtied before dirtied_before) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, - struct wb_writeback_work *work) + unsigned long dirtied_before) { - unsigned long *older_than_this = NULL; - unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -1237,21 +1247,15 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; - if ((flags & EXPIRE_DIRTY_ATIME) == 0) - older_than_this = work->older_than_this; - else if (!work->for_sync) { - expire_time = jiffies - (dirtytime_expire_interval * HZ); - older_than_this = &expire_time; - } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (inode_dirtied_after(inode, dirtied_before)) break; list_move(&inode->i_io_list, &tmp); moved++; - if (flags & EXPIRE_DIRTY_ATIME) - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); + spin_lock(&inode->i_lock); + inode->i_state |= I_SYNC_QUEUED; + spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -1289,18 +1293,22 @@ static int move_expired_inodes(struct list_head *delaying_queue, * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before) { int moved; + unsigned long time_expire_jif = dirtied_before; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); + if (!work->for_sync) + time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, work); + time_expire_jif); if (moved) wb_io_lists_populated(wb); - trace_writeback_queue_io(wb, work, moved); + trace_writeback_queue_io(wb, work, dirtied_before, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -1394,7 +1402,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); return; } @@ -1414,7 +1422,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -1422,10 +1430,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * such as delayed allocation during submission or metadata * updates after data IO completion. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + inode->i_state &= ~I_SYNC_QUEUED; } else { /* The inode is clean. Remove from writeback lists. */ inode_io_list_del_locked(inode, wb); @@ -1472,18 +1481,14 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode->i_lock); dirty = inode->i_state & I_DIRTY; - if (inode->i_state & I_DIRTY_TIME) { - if ((dirty & I_DIRTY_INODE) || - wbc->sync_mode == WB_SYNC_ALL || - unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || - unlikely(time_after(jiffies, - (inode->dirtied_time_when + - dirtytime_expire_interval * HZ)))) { - dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; - trace_writeback_lazytime(inode); - } - } else - inode->i_state &= ~I_DIRTY_TIME_EXPIRED; + if ((inode->i_state & I_DIRTY_TIME) && + ((dirty & I_DIRTY_INODE) || + wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync || + time_after(jiffies, inode->dirtied_time_when + + dirtytime_expire_interval * HZ))) { + dirty |= I_DIRTY_TIME; + trace_writeback_lazytime(inode); + } inode->i_state &= ~dirty; /* @@ -1669,8 +1674,8 @@ static long writeback_sb_inodes(struct super_block *sb, */ spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { + redirty_tail_locked(inode, wb); spin_unlock(&inode->i_lock); - redirty_tail(inode, wb); continue; } if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { @@ -1811,7 +1816,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, blk_start_plug(&plug); spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, &work); + queue_io(wb, &work, jiffies); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); blk_finish_plug(&plug); @@ -1831,7 +1836,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * - * older_than_this takes precedence over nr_to_write. So we'll only write back + * dirtied_before takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, @@ -1839,14 +1844,11 @@ static long wb_writeback(struct bdi_writeback *wb, { unsigned long wb_start = jiffies; long nr_pages = work->nr_pages; - unsigned long oldest_jif; + unsigned long dirtied_before = jiffies; struct inode *inode; long progress; struct blk_plug plug; - oldest_jif = jiffies; - work->older_than_this = &oldest_jif; - blk_start_plug(&plug); spin_lock(&wb->list_lock); for (;;) { @@ -1880,14 +1882,14 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - + dirtied_before = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } else if (work->for_background) - oldest_jif = jiffies; + dirtied_before = jiffies; trace_writeback_start(wb, work); if (list_empty(&wb->b_io)) - queue_io(wb, work); + queue_io(wb, work, dirtied_before); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else @@ -2289,11 +2291,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* - * If the inode is being synced, just update its dirty state. - * The unlocker will place the inode on the appropriate - * superblock list, based upon its state. + * If the inode is queued for writeback by flush worker, just + * update its dirty state. Once the flush worker is done with + * the inode it will place it on the appropriate superblock + * list, based upon its state. */ - if (inode->i_state & I_SYNC) + if (inode->i_state & I_SYNC_QUEUED) goto out_unlock_inode; /* diff --git a/fs/fs_context.c b/fs/fs_context.c index 7d5c5dd2b1d5af34559dfcca079089aa93ddec37..2834d1afa6e8053e93c6e78122be8d58107a36c9 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -521,7 +521,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) switch (param->type) { case fs_value_is_string: len = 1 + param->size; - /* Fall through */ + fallthrough; case fs_value_is_flag: len += strlen(param->key); break; diff --git a/fs/fsopen.c b/fs/fsopen.c index 2fa3f241b76211e2d0d7ba5e6abd18c5f2d21f26..27a890aa493abf89960c374eaeab31192b662d73 100644 --- a/fs/fsopen.c +++ b/fs/fsopen.c @@ -412,7 +412,7 @@ SYSCALL_DEFINE5(fsconfig, break; case FSCONFIG_SET_PATH_EMPTY: lookup_flags = LOOKUP_EMPTY; - /* fallthru */ + fallthrough; case FSCONFIG_SET_PATH: param.type = fs_value_is_filename; param.name = getname_flags(_value, lookup_flags, NULL); diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 770f3a720db9a44f853c95fe4b7985505f7d83ad..0f69fbd4af664e55fde37b93f6aa83c55aa501e2 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -746,7 +746,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, } if (n == 0) break; - /* fall through - To branching from existing tree */ + fallthrough; /* To branching from existing tree */ case ALLOC_GROW_DEPTH: if (i > 1 && i < mp->mp_fheight) gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); @@ -757,7 +757,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, state = ALLOC_DATA; if (n == 0) break; - /* fall through - To tree complete, adding data blocks */ + fallthrough; /* To tree complete, adding data blocks */ case ALLOC_DATA: BUG_ON(n > dblks); BUG_ON(mp->mp_bh[end_of_metadata] == NULL); diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index a58333e3980df804478a6c0b3d8e05485338e59f..3763c9ff1406bdbe102db85e2f5a68bf166e5c09 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -901,6 +901,36 @@ static void empty_ail1_list(struct gfs2_sbd *sdp) } } +/** + * drain_bd - drain the buf and databuf queue for a failed transaction + * @tr: the transaction to drain + * + * When this is called, we're taking an error exit for a log write that failed + * but since we bypassed the after_commit functions, we need to remove the + * items from the buf and databuf queue. + */ +static void trans_drain(struct gfs2_trans *tr) +{ + struct gfs2_bufdata *bd; + struct list_head *head; + + if (!tr) + return; + + head = &tr->tr_buf; + while (!list_empty(head)) { + bd = list_first_entry(head, struct gfs2_bufdata, bd_list); + list_del_init(&bd->bd_list); + kmem_cache_free(gfs2_bufdata_cachep, bd); + } + head = &tr->tr_databuf; + while (!list_empty(head)) { + bd = list_first_entry(head, struct gfs2_bufdata, bd_list); + list_del_init(&bd->bd_list); + kmem_cache_free(gfs2_bufdata_cachep, bd); + } +} + /** * gfs2_log_flush - flush incore transaction(s) * @sdp: the filesystem @@ -1005,6 +1035,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) out: if (gfs2_withdrawn(sdp)) { + trans_drain(tr); /** * If the tr_list is empty, we're withdrawing during a log * flush that targets a transaction, but the transaction was diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 4b67d47a7e00467b087a53f3d44cb15bffee9248..6e173ae378c44fcc3b9b3050182801e6f92824a3 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1599,7 +1599,7 @@ static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) case GFS2_QUOTA_ON: state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; - /*FALLTHRU*/ + fallthrough; case GFS2_QUOTA_ACCOUNT: state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | QCI_SYSFILE; diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index e1c7eb6eb00a4018b5e97495b3aa93bdc13a2ab8..6d4bf7ea7b3be66d3fcdcb51dd0722d5d35826b3 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -67,6 +67,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, tr->tr_reserved += gfs2_struct2blk(sdp, revokes); INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_buf); + INIT_LIST_HEAD(&tr->tr_list); INIT_LIST_HEAD(&tr->tr_ail1_list); INIT_LIST_HEAD(&tr->tr_ail2_list); diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 61eec628805de23af7d29958aea0f08016f8fbfd..0350dc7821bf951e367da0f0bee090f8fb6a50ce 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -195,7 +195,7 @@ int hfsplus_read_wrapper(struct super_block *sb) switch (sbi->s_vhdr->signature) { case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX): set_bit(HFSPLUS_SB_HFSX, &sbi->flags); - /*FALLTHRU*/ + fallthrough; case cpu_to_be16(HFSPLUS_VOLHEAD_SIG): break; case cpu_to_be16(HFSP_WRAP_MAGIC): diff --git a/fs/io-wq.c b/fs/io-wq.c index e92c4724480cadc8756360802ee9d44e748c0f32..414beb5438836cf4c28935bbdfd376e74078e19f 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -925,6 +925,24 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data) return match->nr_running && !match->cancel_all; } +static inline void io_wqe_remove_pending(struct io_wqe *wqe, + struct io_wq_work *work, + struct io_wq_work_node *prev) +{ + unsigned int hash = io_get_work_hash(work); + struct io_wq_work *prev_work = NULL; + + if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { + if (prev) + prev_work = container_of(prev, struct io_wq_work, list); + if (prev_work && io_get_work_hash(prev_work) == hash) + wqe->hash_tail[hash] = prev_work; + else + wqe->hash_tail[hash] = NULL; + } + wq_list_del(&wqe->work_list, &work->list, prev); +} + static void io_wqe_cancel_pending_work(struct io_wqe *wqe, struct io_cb_cancel_data *match) { @@ -938,8 +956,7 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe, work = container_of(node, struct io_wq_work, list); if (!match->fn(work, match->data)) continue; - - wq_list_del(&wqe->work_list, node, prev); + io_wqe_remove_pending(wqe, work, prev); spin_unlock_irqrestore(&wqe->lock, flags); io_run_cancel(work, wqe); match->nr_pending++; diff --git a/fs/io_uring.c b/fs/io_uring.c index dc506b75659c5c2242561b6767caa01974196698..3790c7fe9fee2253b2a608015b099c05c0b0f8ae 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -540,7 +540,6 @@ enum { REQ_F_ISREG_BIT, REQ_F_COMP_LOCKED_BIT, REQ_F_NEED_CLEANUP_BIT, - REQ_F_OVERFLOW_BIT, REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_NO_FILE_TABLE_BIT, @@ -583,8 +582,6 @@ enum { REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT), /* needs cleanup */ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), - /* in overflow list */ - REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT), /* already went through poll handler */ REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), /* buffer already selected */ @@ -946,7 +943,8 @@ static void io_get_req_task(struct io_kiocb *req) static inline void io_clean_op(struct io_kiocb *req) { - if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) + if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | + REQ_F_INFLIGHT)) __io_clean_op(req); } @@ -1152,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req) io_req_init_async(req); if (req->flags & REQ_F_ISREG) { - if (def->hash_reg_file) + if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); } else { if (def->unbound_nonreg_file) @@ -1366,7 +1364,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb, compl.list); list_move(&req->compl.list, &list); - req->flags &= ~REQ_F_OVERFLOW; if (cqe) { WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->res, req->result); @@ -1419,7 +1416,6 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; } io_clean_op(req); - req->flags |= REQ_F_OVERFLOW; req->result = res; req->compl.cflags = cflags; refcount_inc(&req->refs); @@ -1563,17 +1559,6 @@ static bool io_dismantle_req(struct io_kiocb *req) if (req->file) io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); - if (req->flags & REQ_F_INFLIGHT) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; - - spin_lock_irqsave(&ctx->inflight_lock, flags); - list_del(&req->inflight_entry); - if (waitqueue_active(&ctx->inflight_wait)) - wake_up(&ctx->inflight_wait); - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - } - return io_req_clean_work(req); } @@ -1761,7 +1746,8 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req) return __io_req_find_next(req); } -static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) +static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, + bool twa_signal_ok) { struct task_struct *tsk = req->task; struct io_ring_ctx *ctx = req->ctx; @@ -1774,7 +1760,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) * will do the job. */ notify = 0; - if (!(ctx->flags & IORING_SETUP_SQPOLL)) + if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) notify = TWA_SIGNAL; ret = task_work_add(tsk, cb, notify); @@ -1834,7 +1820,7 @@ static void io_req_task_queue(struct io_kiocb *req) init_task_work(&req->task_work, io_req_task_submit); percpu_ref_get(&req->ctx->refs); - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk; @@ -2063,6 +2049,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, req = list_first_entry(done, struct io_kiocb, inflight_entry); if (READ_ONCE(req->result) == -EAGAIN) { + req->result = 0; req->iopoll_completed = 0; list_move_tail(&req->inflight_entry, &again); continue; @@ -2308,38 +2295,27 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error) io_req_complete(req, ret); return false; } - -static void io_rw_resubmit(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - int err; - - err = io_sq_thread_acquire_mm(ctx, req); - - if (io_resubmit_prep(req, err)) { - refcount_inc(&req->refs); - io_queue_async_work(req); - } - - percpu_ref_put(&ctx->refs); -} #endif static bool io_rw_reissue(struct io_kiocb *req, long res) { #ifdef CONFIG_BLOCK + umode_t mode = file_inode(req->file)->i_mode; int ret; + if (!S_ISBLK(mode) && !S_ISREG(mode)) + return false; if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) return false; - init_task_work(&req->task_work, io_rw_resubmit); - percpu_ref_get(&req->ctx->refs); + ret = io_sq_thread_acquire_mm(req->ctx, req); - ret = io_req_task_work_add(req, &req->task_work); - if (!ret) + if (io_resubmit_prep(req, ret)) { + refcount_inc(&req->refs); + io_queue_async_work(req); return true; + } + #endif return false; } @@ -2578,7 +2554,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) * IO with EINTR. */ ret = -EINTR; - /* fall through */ + fallthrough; default: kiocb->ki_complete(kiocb, ret, 0); } @@ -2819,22 +2795,15 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, needs_lock); } -static ssize_t io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock) +static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock) { void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; ssize_t ret; u8 opcode; - if (req->io) { - struct io_async_rw *iorw = &req->io->rw; - - *iovec = NULL; - return iov_iter_count(&iorw->iter); - } - opcode = req->opcode; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; @@ -2848,10 +2817,8 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, needs_lock); - if (IS_ERR(buf)) { - *iovec = NULL; + if (IS_ERR(buf)) return PTR_ERR(buf); - } req->rw.len = sqe_len; } @@ -2879,6 +2846,21 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); } +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock) +{ + if (!req->io) + return __io_import_iovec(rw, req, iovec, iter, needs_lock); + *iovec = NULL; + return iov_iter_count(&req->io->rw.iter); +} + +static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) +{ + return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos; +} + /* * For files that don't have ->read_iter() and ->write_iter(), handle them * by looping over ->read() or ->write() manually. @@ -2914,10 +2896,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); } else { nr = file->f_op->write(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); } if (iov_iter_is_bvec(iter)) @@ -2998,17 +2980,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw, bool force_nonblock) { struct io_async_rw *iorw = &req->io->rw; + struct iovec *iov; ssize_t ret; - iorw->iter.iov = iorw->fast_iov; - /* reset ->io around the iovec import, we don't want to use it */ - req->io = NULL; - ret = io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov, - &iorw->iter, !force_nonblock); - req->io = container_of(iorw, struct io_async_ctx, rw); + iorw->iter.iov = iov = iorw->fast_iov; + ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock); if (unlikely(ret < 0)) return ret; + iorw->iter.iov = iov; io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter); return 0; } @@ -3061,7 +3041,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, /* submit ref gets dropped, acquire a new one */ refcount_inc(&req->refs); - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, true); if (unlikely(ret)) { struct task_struct *tsk; @@ -3074,27 +3054,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, return 1; } -static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb, - struct wait_page_queue *wait, - wait_queue_func_t func, - void *data) -{ - /* Can't support async wakeup with polled IO */ - if (kiocb->ki_flags & IOCB_HIPRI) - return -EINVAL; - if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) { - wait->wait.func = func; - wait->wait.private = data; - wait->wait.flags = 0; - INIT_LIST_HEAD(&wait->wait.entry); - kiocb->ki_flags |= IOCB_WAITQ; - kiocb->ki_waitq = wait; - return 0; - } - - return -EOPNOTSUPP; -} - /* * This controls whether a given IO request should be armed for async page * based retry. If we return false here, the request is handed to the async @@ -3109,16 +3068,17 @@ static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb, */ static bool io_rw_should_retry(struct io_kiocb *req) { + struct wait_page_queue *wait = &req->io->rw.wpq; struct kiocb *kiocb = &req->rw.kiocb; - int ret; /* never retry for NOWAIT, we just complete with -EAGAIN */ if (req->flags & REQ_F_NOWAIT) return false; /* Only for buffered IO */ - if (kiocb->ki_flags & IOCB_DIRECT) + if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) return false; + /* * just use poll if we can, and don't attempt if the fs doesn't * support callback based unlocks @@ -3126,14 +3086,15 @@ static bool io_rw_should_retry(struct io_kiocb *req) if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) return false; - ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq, - io_async_buf_func, req); - if (!ret) { - io_get_req_task(req); - return true; - } + wait->wait.func = io_async_buf_func; + wait->wait.private = req; + wait->wait.flags = 0; + INIT_LIST_HEAD(&wait->wait.entry); + kiocb->ki_flags |= IOCB_WAITQ; + kiocb->ki_waitq = wait; - return false; + io_get_req_task(req); + return true; } static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) @@ -3161,6 +3122,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); io_size = ret; req->result = io_size; ret = 0; @@ -3173,8 +3135,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, if (force_nonblock && !io_file_supports_async(req->file, READ)) goto copy_iov; - iov_count = iov_iter_count(iter); - ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count); if (unlikely(ret)) goto out_free; @@ -3186,14 +3147,21 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ret = 0; goto out_free; } else if (ret == -EAGAIN) { - if (!force_nonblock) + /* IOPOLL retry should happen for io-wq threads */ + if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) goto done; + /* no retry on NONBLOCK marked file */ + if (req->file->f_flags & O_NONBLOCK) + goto done; + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (ret) goto out_free; return -EAGAIN; } else if (ret < 0) { - goto out_free; + /* make sure -ERESTARTSYS -> -EINTR is done */ + goto done; } /* read it all, or we did blocking attempt. no retry. */ @@ -3238,6 +3206,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, kiocb_done(kiocb, ret, cs); ret = 0; out_free: + /* it's reportedly faster than delegating the null check to kfree() */ if (iovec) kfree(iovec); return ret; @@ -3276,6 +3245,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); io_size = ret; req->result = io_size; @@ -3292,8 +3262,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, (req->flags & REQ_F_ISREG)) goto copy_iov; - iov_count = iov_iter_count(iter); - ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count); if (unlikely(ret)) goto out_free; @@ -3325,15 +3294,25 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, */ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) ret2 = -EAGAIN; + /* no retry on NONBLOCK marked file */ + if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) + goto done; if (!force_nonblock || ret2 != -EAGAIN) { + /* IOPOLL retry should happen for io-wq threads */ + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) + goto copy_iov; +done: kiocb_done(kiocb, ret2, cs); } else { copy_iov: + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (!ret) return -EAGAIN; } out_free: + /* it's reportedly faster than delegating the null check to kfree() */ if (iovec) kfree(iovec); return ret; @@ -4600,6 +4579,7 @@ struct io_poll_table { static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { + bool twa_signal_ok; int ret; /* for instances that support it check for an event match first: */ @@ -4614,13 +4594,21 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, init_task_work(&req->task_work, func); percpu_ref_get(&req->ctx->refs); + /* + * If we using the signalfd wait_queue_head for this wakeup, then + * it's not safe to use TWA_SIGNAL as we could be recursing on the + * tsk->sighand->siglock on doing the wakeup. Should not be needed + * either, as the normal wakeup will suffice. + */ + twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); + /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead * of executing it. We can't safely execute it anyway, as we may not * have the needed state needed for it anyway. */ - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok); if (unlikely(ret)) { struct task_struct *tsk; @@ -4909,12 +4897,20 @@ static bool io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; + int rw; if (!req->file || !file_can_poll(req->file)) return false; if (req->flags & REQ_F_POLLED) return false; - if (!def->pollin && !def->pollout) + if (def->pollin) + rw = READ; + else if (def->pollout) + rw = WRITE; + else + return false; + /* if we can't nonblock try, then no point in arming a poll handler */ + if (!io_file_supports_async(req->file, rw)) return false; apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); @@ -5653,6 +5649,18 @@ static void __io_clean_op(struct io_kiocb *req) } req->flags &= ~REQ_F_NEED_CLEANUP; } + + if (req->flags & REQ_F_INFLIGHT) { + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->inflight_lock, flags); + list_del(&req->inflight_entry); + if (waitqueue_active(&ctx->inflight_wait)) + wake_up(&ctx->inflight_wait); + spin_unlock_irqrestore(&ctx->inflight_lock, flags); + req->flags &= ~REQ_F_INFLIGHT; + } } static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, @@ -7327,7 +7335,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; index = i & IORING_FILE_TABLE_MASK; if (table->files[index]) { - file = io_file_from_index(ctx, index); + file = table->files[index]; err = io_queue_file_removal(data, file); if (err) break; @@ -7356,6 +7364,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, table->files[index] = file; err = io_sqe_file_register(ctx, file, i); if (err) { + table->files[index] = NULL; fput(file); break; } @@ -7455,9 +7464,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, { int ret; - mmgrab(current->mm); - ctx->sqo_mm = current->mm; - if (ctx->flags & IORING_SETUP_SQPOLL) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) @@ -7502,10 +7508,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, return 0; err: io_finish_async(ctx); - if (ctx->sqo_mm) { - mmdrop(ctx->sqo_mm); - ctx->sqo_mm = NULL; - } return ret; } @@ -7979,7 +7981,13 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ACCT_LOCKED); INIT_WORK(&ctx->exit_work, io_ring_exit_work); - queue_work(system_wq, &ctx->exit_work); + /* + * Use system_unbound_wq to avoid spawning tons of event kworkers + * if we're exiting a ton of rings at the same time. It just adds + * noise and overhead, there's no discernable change in runtime + * over using system_wq. + */ + queue_work(system_unbound_wq, &ctx->exit_work); } static int io_uring_release(struct inode *inode, struct file *file) @@ -8016,6 +8024,28 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req) return false; } +static inline bool io_match_files(struct io_kiocb *req, + struct files_struct *files) +{ + return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files; +} + +static bool io_match_link_files(struct io_kiocb *req, + struct files_struct *files) +{ + struct io_kiocb *link; + + if (io_match_files(req, files)) + return true; + if (req->flags & REQ_F_LINK_HEAD) { + list_for_each_entry(link, &req->link_list, link_list) { + if (io_match_files(link, files)) + return true; + } + } + return false; +} + /* * We're looking to cancel 'req' because it's holding on to our files, but * 'req' could be a link to another request. See if it is, and cancel that @@ -8063,12 +8093,65 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx, return found; } +static bool io_cancel_link_cb(struct io_wq_work *work, void *data) +{ + return io_match_link(container_of(work, struct io_kiocb, work), data); +} + +static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) +{ + enum io_wq_cancel cret; + + /* cancel this particular work, if it's running */ + cret = io_wq_cancel_work(ctx->io_wq, &req->work); + if (cret != IO_WQ_CANCEL_NOTFOUND) + return; + + /* find links that hold this pending, cancel those */ + cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true); + if (cret != IO_WQ_CANCEL_NOTFOUND) + return; + + /* if we have a poll link holding this pending, cancel that */ + if (io_poll_remove_link(ctx, req)) + return; + + /* final option, timeout link is holding this req pending */ + io_timeout_remove_link(ctx, req); +} + +static void io_cancel_defer_files(struct io_ring_ctx *ctx, + struct files_struct *files) +{ + struct io_defer_entry *de = NULL; + LIST_HEAD(list); + + spin_lock_irq(&ctx->completion_lock); + list_for_each_entry_reverse(de, &ctx->defer_list, list) { + if (io_match_link_files(de->req, files)) { + list_cut_position(&list, &ctx->defer_list, &de->list); + break; + } + } + spin_unlock_irq(&ctx->completion_lock); + + while (!list_empty(&list)) { + de = list_first_entry(&list, struct io_defer_entry, list); + list_del_init(&de->list); + req_set_fail_links(de->req); + io_put_req(de->req); + io_req_complete(de->req, -ECANCELED); + kfree(de); + } +} + static void io_uring_cancel_files(struct io_ring_ctx *ctx, struct files_struct *files) { if (list_empty_careful(&ctx->inflight_list)) return; + io_cancel_defer_files(ctx, files); /* cancel all at once, should be faster than doing it one by one*/ io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); @@ -8094,35 +8177,9 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, /* We need to keep going until we don't find a matching req */ if (!cancel_req) break; - - if (cancel_req->flags & REQ_F_OVERFLOW) { - spin_lock_irq(&ctx->completion_lock); - list_del(&cancel_req->compl.list); - cancel_req->flags &= ~REQ_F_OVERFLOW; - - io_cqring_mark_overflow(ctx); - WRITE_ONCE(ctx->rings->cq_overflow, - atomic_inc_return(&ctx->cached_cq_overflow)); - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); - - /* - * Put inflight ref and overflow ref. If that's - * all we had, then we're done with this request. - */ - if (refcount_sub_and_test(2, &cancel_req->refs)) { - io_free_req(cancel_req); - finish_wait(&ctx->inflight_wait, &wait); - continue; - } - } else { - io_wq_cancel_work(ctx->io_wq, &cancel_req->work); - /* could be a link, check and remove if it is */ - if (!io_poll_remove_link(ctx, cancel_req)) - io_timeout_remove_link(ctx, cancel_req); - io_put_req(cancel_req); - } - + /* cancel this request, or head link requests */ + io_attempt_cancel(ctx, cancel_req); + io_put_req(cancel_req); schedule(); finish_wait(&ctx->inflight_wait, &wait); } @@ -8548,6 +8605,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ctx->user = user; ctx->creds = get_current_cred(); + mmgrab(current->mm); + ctx->sqo_mm = current->mm; + /* * Account memory _before_ installing the file descriptor. Once * the descriptor is installed, it can get closed at any time. Also diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index 89f61d93c0bcfddad22c6dda6385bdaa4e8da893..107ee80c356833936487d200fae5825eac90d976 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -127,7 +127,7 @@ iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, SEEK_HOLE); if (offset < 0) return length; - /* fall through */ + fallthrough; case IOMAP_HOLE: *(loff_t *)data = offset; return 0; @@ -175,7 +175,7 @@ iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, SEEK_DATA); if (offset < 0) return length; - /*FALLTHRU*/ + fallthrough; default: *(loff_t *)data = offset; return 0; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e4944436e733d01611504502455d24147fa1f2ce..17fdc482f554affb72940291ab33173d482c4c1a 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1285,7 +1285,7 @@ journal_t *jbd2_journal_init_inode(struct inode *inode) * superblock as being NULL to prevent the journal destroy from writing * back a bogus superblock. */ -static void journal_fail_superblock (journal_t *journal) +static void journal_fail_superblock(journal_t *journal) { struct buffer_head *bh = journal->j_sb_buffer; brelse(bh); @@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) int ret; /* Buffer got discarded which means block device got invalidated */ - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh)) { + unlock_buffer(bh); return -EIO; + } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) @@ -1815,7 +1817,7 @@ int jbd2_journal_destroy(journal_t *journal) /** - *int jbd2_journal_check_used_features () - Check if features specified are used. + *int jbd2_journal_check_used_features() - Check if features specified are used. * @journal: Journal to check. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount @@ -1825,7 +1827,7 @@ int jbd2_journal_destroy(journal_t *journal) * features. Return true (non-zero) if it does. **/ -int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat, +int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { journal_superblock_t *sb; @@ -1860,7 +1862,7 @@ int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat, * all of a given set of features on this journal. Return true * (non-zero) if it can. */ -int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat, +int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { if (!compat && !ro && !incompat) @@ -1882,7 +1884,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com } /** - * int jbd2_journal_set_features () - Mark a given journal feature in the superblock + * int jbd2_journal_set_features() - Mark a given journal feature in the superblock * @journal: Journal to act on. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount @@ -1893,7 +1895,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com * */ -int jbd2_journal_set_features (journal_t *journal, unsigned long compat, +int jbd2_journal_set_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { #define INCOMPAT_FEATURE_ON(f) \ diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 2ed278f0dcede6888110ca98930cb06d160f6957..faa97d748474d5470a541e1760f6e83345a4671f 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -690,14 +690,11 @@ static int do_one_pass(journal_t *journal, * number. */ if (pass == PASS_SCAN && jbd2_has_feature_checksum(journal)) { - int chksum_err, chksum_seen; struct commit_header *cbh = (struct commit_header *)bh->b_data; unsigned found_chksum = be32_to_cpu(cbh->h_chksum[0]); - chksum_err = chksum_seen = 0; - if (info->end_transaction) { journal->j_failed_commit = info->end_transaction; @@ -705,42 +702,23 @@ static int do_one_pass(journal_t *journal, break; } - if (crc32_sum == found_chksum && - cbh->h_chksum_type == JBD2_CRC32_CHKSUM && - cbh->h_chksum_size == - JBD2_CRC32_CHKSUM_SIZE) - chksum_seen = 1; - else if (!(cbh->h_chksum_type == 0 && - cbh->h_chksum_size == 0 && - found_chksum == 0 && - !chksum_seen)) - /* - * If fs is mounted using an old kernel and then - * kernel with journal_chksum is used then we - * get a situation where the journal flag has - * checksum flag set but checksums are not - * present i.e chksum = 0, in the individual - * commit blocks. - * Hence to avoid checksum failures, in this - * situation, this extra check is added. - */ - chksum_err = 1; - - if (chksum_err) { - info->end_transaction = next_commit_ID; - - if (!jbd2_has_feature_async_commit(journal)) { - journal->j_failed_commit = - next_commit_ID; - brelse(bh); - break; - } - } + /* Neither checksum match nor unused? */ + if (!((crc32_sum == found_chksum && + cbh->h_chksum_type == + JBD2_CRC32_CHKSUM && + cbh->h_chksum_size == + JBD2_CRC32_CHKSUM_SIZE) || + (cbh->h_chksum_type == 0 && + cbh->h_chksum_size == 0 && + found_chksum == 0))) + goto chksum_error; + crc32_sum = ~0; } if (pass == PASS_SCAN && !jbd2_commit_block_csum_verify(journal, bh->b_data)) { + chksum_error: info->end_transaction = next_commit_ID; if (!jbd2_has_feature_async_commit(journal)) { diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e91aad3637a23fb63d4421d9a441c1ac7ef5dc04..43985738aa860d91d1ecda400783cc99eae6ed2b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2026,6 +2026,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { + J_ASSERT_JH(jh, jh->b_transaction != NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; } @@ -2078,10 +2081,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) * int jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @page: to try and free - * @gfp_mask: we use the mask to detect how hard should we try to release - * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit - * code to release the buffers. - * * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN @@ -2112,11 +2111,11 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) * * Return 0 on failure, 1 on success */ -int jbd2_journal_try_to_free_buffers(journal_t *journal, - struct page *page, gfp_t gfp_mask) +int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page) { struct buffer_head *head; struct buffer_head *bh; + bool has_write_io_error = false; int ret = 0; J_ASSERT(PageLocked(page)); @@ -2141,11 +2140,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, jbd2_journal_put_journal_head(jh); if (buffer_jbd(bh)) goto busy; + + /* + * If we free a metadata buffer which has been failed to + * write out, the jbd2 checkpoint procedure will not detect + * this failure and may lead to filesystem inconsistency + * after cleanup journal tail. + */ + if (buffer_write_io_error(bh)) { + pr_err("JBD2: Error while async write back metadata bh %llu.", + (unsigned long long)bh->b_blocknr); + has_write_io_error = true; + } } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: + if (has_write_io_error) + jbd2_journal_abort(journal, -EIO); + return ret; } @@ -2572,6 +2586,13 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + + /* + * b_transaction must be set, otherwise the new b_transaction won't + * be holding jh reference + */ + J_ASSERT_JH(jh, jh->b_transaction != NULL); + /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index ab8cdd9e932590ea484f2e45b927e26f244ad2f7..78858f6e95839cb03859fd36d67dfd85d7c6ba74 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -341,7 +341,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); else rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); - /* fall through */ + fallthrough; case S_IFSOCK: case S_IFIFO: diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index bccfc40b3a74ab002e45a07149afbe09634d8f64..2f6f0b140c05aaa0217e85da7c4211b9aa5d2321 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1273,7 +1273,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, dbg_readinode("symlink's target '%s' cached\n", f->target); } - /* fall through... */ + fallthrough; case S_IFBLK: case S_IFCHR: diff --git a/fs/libfs.c b/fs/libfs.c index 4d08edf19c78252ebe3af999bf60e19823d3b8f2..e0d42e977d9afd5aafcd0fb85cce4c9752ef895f 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -137,11 +137,11 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case 1: offset += file->f_pos; - /* fall through */ + fallthrough; case 0: if (offset >= 0) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/fs/locks.c b/fs/locks.c index 8fc0542f513264c6fd1eb96581ab888878e99f8b..1f84a03601feca5ef577682572be81808b97e53f 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1499,7 +1499,7 @@ static void lease_clear_pending(struct file_lock *fl, int arg) switch (arg) { case F_UNLCK: fl->fl_flags &= ~FL_UNLOCK_PENDING; - /* fall through */ + fallthrough; case F_RDLCK: fl->fl_flags &= ~FL_DOWNGRADE_PENDING; } @@ -2525,7 +2525,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, cmd = F_SETLKW; file_lock->fl_flags |= FL_OFDLCK; file_lock->fl_owner = filp; - /* Fallthrough */ + fallthrough; case F_SETLKW: file_lock->fl_flags |= FL_SLEEP; } @@ -2656,7 +2656,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, cmd = F_SETLKW64; file_lock->fl_flags |= FL_OFDLCK; file_lock->fl_owner = filp; - /* Fallthrough */ + fallthrough; case F_SETLKW64: file_lock->fl_flags |= FL_SLEEP; } diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index d1a0e2c8b1b486bd2866b388fce595f036abe58b..08108b6d2fa107e3bbc0cc86668fea662db4f2eb 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -753,7 +753,7 @@ bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr, case -ENODEV: /* Our extent block devices are unavailable */ set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); - /* Fall through */ + fallthrough; case 0: return lseg; default: diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a12f42e7d8c7c417caeb23b88dcd4542d5424f50..e732580fe47b321d2c852e80e5e11100ebe34948 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1181,7 +1181,7 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) /* A NFSv4 OPEN will revalidate later */ if (server->caps & NFS_CAP_ATOMIC_OPEN) goto out; - /* Fallthrough */ + fallthrough; case S_IFDIR: if (server->flags & NFS_MOUNT_NOCTO) break; diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index a13e69009f19610c7afe1136cd2d79bb3c475146..7f5aa0403e16743c2266c6308cacb9fec20711ea 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -187,7 +187,7 @@ static int filelayout_async_handle_error(struct rpc_task *task, pnfs_error_mark_layout_for_return(inode, lseg); pnfs_set_lo_fail(lseg); rpc_wake_up(&tbl->slot_tbl_waitq); - /* fall through */ + fallthrough; default: reset: dprintk("%s Retry through MDS. Error %d\n", __func__, diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 965145592750afe3312e9569c666233e386dbda4..ff8965d1a4d47ee7436e642a12ff199137cfa212 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1133,7 +1133,7 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); rpc_wake_up(&tbl->slot_tbl_waitq); - /* fall through */ + fallthrough; default: if (ff_layout_avoid_mds_available_ds(lseg)) return -NFS4ERR_RESET_TO_PNFS; @@ -1260,7 +1260,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, */ if (opnum == OP_READ) break; - /* Fallthrough */ + fallthrough; default: pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index 66949da0e82779891350964bc45e87872f9a8b78..524812984e2d43d78e275f5c6b08190797e0c3a5 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -651,21 +651,21 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_tcp: ctx->flags |= NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_rdma: /* vector side protocols to TCP */ ctx->flags |= NFS_MOUNT_TCP; @@ -684,13 +684,13 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: mountfamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_udp: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: mountfamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_tcp: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; break; @@ -899,9 +899,11 @@ static int nfs23_parse_monolithic(struct fs_context *fc, ctx->version = NFS_DEFAULT_VERSION; switch (data->version) { case 1: - data->namlen = 0; /* fall through */ + data->namlen = 0; + fallthrough; case 2: - data->bsize = 0; /* fall through */ + data->bsize = 0; + fallthrough; case 3: if (data->flags & NFS_MOUNT_VER3) goto out_no_v3; @@ -909,14 +911,14 @@ static int nfs23_parse_monolithic(struct fs_context *fc, memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); /* Turn off security negotiation */ extra_flags |= NFS_MOUNT_SECFLAVOUR; - /* fall through */ + fallthrough; case 4: if (data->flags & NFS_MOUNT_SECFLAVOUR) goto out_no_sec; - /* fall through */ + fallthrough; case 5: memset(data->context, 0, sizeof(data->context)); - /* fall through */ + fallthrough; case 6: if (data->flags & NFS_MOUNT_VER3) { if (data->root.size > NFS3_FHSIZE || data->root.size == 0) diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 26c94b32d6f49e15f2c4e4a50d219f6a8592e034..c6c863382f375b5425b7f9949bcda47314a9f754 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -108,7 +108,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) case -EPROTONOSUPPORT: dprintk("NFS_V3_ACL extension not supported; disabling\n"); server->caps &= ~NFS_CAP_ACLS; - /* fall through */ + fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; default: @@ -228,7 +228,7 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, dprintk("NFS_V3_ACL SETACL RPC not supported" "(will not retry)\n"); server->caps &= ~NFS_CAP_ACLS; - /* fall through */ + fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; } diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index a339707654673bd17fb71ec9105b5f50a1c52c15..fdfc77486acee6b34ed9b54c16e7f8e27c2e238d 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) ret = nfs42_proc_llseek(filep, offset, whence); if (ret != -ENOTSUPP) return ret; - /* Fall through */ + fallthrough; default: return nfs_file_llseek(filep, offset, whence); } diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 1e7296395d71bfc3144d5a57c6658712d7730c09..62e6eea5c516d8c1a19fd96ea4d749b439875da9 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -520,7 +520,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, switch (token) { case Opt_find_uid: im->im_type = IDMAP_TYPE_USER; - /* Fall through */ + fallthrough; case Opt_find_gid: im->im_conv = IDMAP_CONV_NAMETOID; ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ); @@ -528,7 +528,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, case Opt_find_user: im->im_type = IDMAP_TYPE_USER; - /* Fall through */ + fallthrough; case Opt_find_group: im->im_conv = IDMAP_CONV_IDTONAME; ret = match_int(&substr, &im->im_id); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dbd01548335b2d034d90df3c4ea56ce4ae4dda64..6e95c85fe395a88b0c93f75a17bfc8c76bcbbbde 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -483,7 +483,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server, stateid); goto wait_on_recovery; } - /* Fall through */ + fallthrough; case -NFS4ERR_OPENMODE: if (inode) { int err; @@ -534,10 +534,10 @@ static int nfs4_do_handle_exception(struct nfs_server *server, ret = -EBUSY; break; } - /* Fall through */ + fallthrough; case -NFS4ERR_DELAY: nfs_inc_server_stats(server, NFSIOS_DELAY); - /* Fall through */ + fallthrough; case -NFS4ERR_GRACE: case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: @@ -1505,7 +1505,7 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, case NFS4_OPEN_CLAIM_PREVIOUS: if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) break; - /* Fall through */ + fallthrough; default: return 0; } @@ -2439,7 +2439,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) case NFS4_OPEN_CLAIM_DELEG_CUR_FH: case NFS4_OPEN_CLAIM_DELEG_PREV_FH: data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; - /* Fall through */ + fallthrough; case NFS4_OPEN_CLAIM_FH: task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; } @@ -3293,8 +3293,10 @@ static int _nfs4_do_setattr(struct inode *inode, /* Servers should only apply open mode checks for file size changes */ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; - if (!truncate) + if (!truncate) { + nfs4_inode_make_writeable(inode); goto zero_stateid; + } if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ @@ -3545,11 +3547,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data) nfs4_free_revoked_stateid(server, &calldata->arg.stateid, task->tk_msg.rpc_cred); - /* Fallthrough */ + fallthrough; case -NFS4ERR_BAD_STATEID: if (calldata->arg.fmode == 0) break; - /* Fallthrough */ + fallthrough; default: task->tk_status = nfs4_async_handle_exception(task, server, task->tk_status, &exception); @@ -6294,7 +6296,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) nfs4_free_revoked_stateid(data->res.server, data->args.stateid, task->tk_msg.rpc_cred); - /* Fallthrough */ + fallthrough; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -ETIMEDOUT: @@ -6314,7 +6316,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) data->res.fattr = NULL; goto out_restart; } - /* Fallthrough */ + fallthrough; default: task->tk_status = nfs4_async_handle_exception(task, data->res.server, task->tk_status, @@ -6622,13 +6624,13 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) if (nfs4_update_lock_stateid(calldata->lsp, &calldata->res.stateid)) break; - /* Fall through */ + fallthrough; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_EXPIRED: nfs4_free_revoked_stateid(calldata->server, &calldata->arg.stateid, task->tk_msg.rpc_cred); - /* Fall through */ + fallthrough; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: if (nfs4_sync_lock_stateid(&calldata->arg.stateid, @@ -7298,7 +7300,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, err = nfs4_set_lock_state(state, fl); if (err != 0) return err; - err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + if (err != -NFS4ERR_DELAY) + break; + ssleep(1); + } while (err == -NFS4ERR_DELAY); return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); } @@ -8665,7 +8672,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; - /* fall through */ + fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: rpc_restart_call_prepare(task); return; @@ -9113,13 +9120,13 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf switch(task->tk_status) { case 0: wake_up_all(&clp->cl_lock_waitq); - /* Fallthrough */ + fallthrough; case -NFS4ERR_COMPLETE_ALREADY: case -NFS4ERR_WRONG_CRED: /* What to do here? */ break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); - /* fall through */ + fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; case -NFS4ERR_BADSESSION: @@ -9434,10 +9441,10 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) &lrp->args.range, lrp->args.inode)) goto out_restart; - /* Fallthrough */ + fallthrough; default: task->tk_status = 0; - /* Fallthrough */ + fallthrough; case 0: break; case -NFS4ERR_DELAY: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b1dba24918f806139108645d424eb48e7af1180c..4bf10792cb5b1049790ec0d0584f263fb3bac96c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1530,7 +1530,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ default: pr_err("NFS: %s: unhandled error %d\n", __func__, status); - /* Fall through */ + fallthrough; case -ENOMEM: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: @@ -1667,7 +1667,7 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs break; } printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); - /* Fall through */ + fallthrough; case -ENOENT: case -ENOMEM: case -EACCES: @@ -1683,7 +1683,7 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs set_bit(ops->state_flag_bit, &state->flags); break; } - /* Fall through */ + fallthrough; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: @@ -1695,7 +1695,7 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); - /* Fall through */ + fallthrough; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -2273,11 +2273,11 @@ int nfs4_discover_server_trunking(struct nfs_client *clp, case -ETIMEDOUT: if (clnt->cl_softrtry) break; - /* Fall through */ + fallthrough; case -NFS4ERR_DELAY: case -EAGAIN: ssleep(1); - /* Fall through */ + fallthrough; case -NFS4ERR_STALE_CLIENTID: dprintk("NFS: %s after status %d, retrying\n", __func__, status); @@ -2289,7 +2289,7 @@ int nfs4_discover_server_trunking(struct nfs_client *clp, } if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) break; - /* Fall through */ + fallthrough; case -NFS4ERR_CLID_INUSE: case -NFS4ERR_WRONGSEC: /* No point in retrying if we already used RPC_AUTH_UNIX */ diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 6ea4cac41e46fe3ea30d1c0e8888ec5bb45e8390..6985cacf4700df1759239ddf0e73d44f59806f06 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -711,7 +711,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, case FLUSH_COND_STABLE: if (nfs_reqs_to_commit(cinfo)) break; - /* fall through */ + fallthrough; default: hdr->args.stable = NFS_FILE_SYNC; } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 40332c758d8466392fdf264d9c0ae8954ec2941e..71f7741126b6dcc5f247d8fd799e9cd488ac382e 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1541,7 +1541,7 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args, case 0: if (res->lrs_present) res_stateid = &res->stateid; - /* Fallthrough */ + fallthrough; default: arg_stateid = &args->stateid; } diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index 8ceb6425e01aa2f67f5442e83429e6874ed80106..d056ad2fdefd6eb387217712a3c792ad24ebac2b 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -237,7 +237,7 @@ posix_acl_from_nfsacl(struct posix_acl *acl) break; case ACL_MASK: mask = pa; - /* fall through */ + fallthrough; case ACL_OTHER: break; } diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 9bbaa671c0799d70d8bad77b47dd7e9eb83fdf4c..311e5ce80cfc27bf36c29075927408adf04d772d 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -83,13 +83,13 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, bex->soff = iomap.addr; break; } - /*FALLTHRU*/ + fallthrough; case IOMAP_HOLE: if (seg->iomode == IOMODE_READ) { bex->es = PNFS_BLOCK_NONE_DATA; break; } - /*FALLTHRU*/ + fallthrough; case IOMAP_DELALLOC: default: WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type); diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7fbe9840a03e70e5fb536527cbe94e4f3085ddb4..052be5bf9ef50ca21c920f457aa29ed1e1cd6ff9 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1119,7 +1119,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback break; case -ESERVERFAULT: ++session->se_cb_seq_nr; - /* Fall through */ + fallthrough; case 1: case -NFS4ERR_BADSESSION: nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status); diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index e12409eca7cc42facf54b4af0d8a636e6bc7fb0e..a97873f2d22b0def8c96893fc5b8dc697e8d783f 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -681,7 +681,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) rpc_delay(task, HZ/100); /* 10 mili-seconds */ return 0; } - /* Fallthrough */ + fallthrough; default: /* * Unknown error or non-responding client, we'll need to fence. diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index a527da3d8052191e81f5424d3fda9e1ddebdc870..eaf50eafa93596f791b2461cd8de5cbdca8436f5 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -428,7 +428,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; reclaim = true; - /* fall through */ + fallthrough; case NFS4_OPEN_CLAIM_FH: case NFS4_OPEN_CLAIM_DELEG_CUR_FH: status = do_open_fhandle(rqstp, cstate, open); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 81ed8e8bab3fc786561580f2b752af2919e9208e..c09a2a4281ec93a28ccec489d2b22a788f319467 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3117,7 +3117,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, break; default: /* checked by xdr code */ WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case SP4_SSV: status = nfserr_encr_alg_unsupp; goto out_nolock; @@ -4532,7 +4532,7 @@ static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, rpc_delay(task, 2 * HZ); return 0; } - /*FALLTHRU*/ + fallthrough; default: return 1; } @@ -4597,6 +4597,8 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl) if (!i_am_nfsd()) return NULL; rqst = kthread_data(current); + if (!rqst->rq_lease_breaker) + return NULL; clp = *(rqst->rq_lease_breaker); return dl->dl_stid.sc_client == clp; } @@ -5652,7 +5654,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) break; default: printk("unknown stateid type %x\n", s->sc_type); - /* Fallthrough */ + fallthrough; case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: status = nfserr_bad_stateid; @@ -6742,7 +6744,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_READW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_READ_LT: spin_lock(&fp->fi_lock); nf = find_readable_file_locked(fp); @@ -6754,7 +6756,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); nf = find_writeable_file_locked(fp); @@ -6816,7 +6818,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, break; case FILE_LOCK_DEFERRED: nbl = NULL; - /* Fallthrough */ + fallthrough; case -EAGAIN: /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 37bc8f5f4514286faed2941df3788e65dfc640e9..c81dbbad8792011847fd7472d21c9498592b379c 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -459,7 +459,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) case FSID_DEV: if (!old_valid_dev(exp_sb(exp)->s_dev)) return false; - /* FALL THROUGH */ + fallthrough; case FSID_MAJOR_MINOR: case FSID_ENCODE_DEV: return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV; @@ -469,7 +469,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) case FSID_UUID16: if (!is_root_export(exp)) return false; - /* fall through */ + fallthrough; case FSID_UUID4_INUM: case FSID_UUID16_INUM: return exp->ex_uuid != NULL; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 543bbe0a556e7387c19e7aa40fd6400698766f86..6e0b066480c50428370ffd632d63c06b42208d12 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -314,7 +314,7 @@ nfsd_proc_create(struct svc_rqst *rqstp) rdev = inode->i_rdev; attr->ia_valid |= ATTR_SIZE; - /* FALLTHROUGH */ + fallthrough; case S_IFIFO: /* this is probably a permission check.. * at least IRIX implements perm checking on diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b603dfcdd3615fb6a37e2021e0b07ef14e17f256..f7f6473578afaf6478b0dde4a001f8194b6ba82a 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -221,7 +221,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change) case NFSD_TEST: if (nn->nfsd_versions) return nn->nfsd_versions[vers]; - /* Fallthrough */ + fallthrough; case NFSD_AVAIL: return nfsd_support_version(vers); } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 7d2933b85b6536c0c26a0e9b2beb6e646757dce6..aba5af9df328a411df5d2c9bbb1843dcf1f98ad4 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1456,7 +1456,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, *created = true; break; } - /* fall through */ + fallthrough; case NFS4_CREATE_EXCLUSIVE4_1: if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime && d_inode(dchild)->i_atime.tv_sec == v_atime @@ -1465,7 +1465,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, *created = true; goto set_attr; } - /* fall through */ + fallthrough; case NFS3_CREATE_GUARDED: err = nfserr_exist; } diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index fb5a9a8a13cf7d734c0c13f38df097786c5ba5a1..e516ae389ca5bede8bfcf9d3fdc669464a84dd8c 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -519,7 +519,7 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) break; case NILFS_IFILE_INO: lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); - /* Fall through */ + fallthrough; default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 0b453ef8fae5cf55018d4e20580f0e402449a10a..2217f904a7cfb5230b2e66b11660a0dbb1d20d24 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -626,7 +626,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, !(flags & NILFS_SS_SYNDT)) goto try_next_pseg; state = RF_DSYNC_ST; - /* Fall through */ + fallthrough; case RF_DSYNC_ST: if (!(flags & NILFS_SS_SYNDT)) goto confused; diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index a651e821c2de3e395da124a35099fc1fedd93886..e3726aca28ed63d5441778a4c9bc5b24050d5a18 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1138,7 +1138,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) nilfs_sc_cstage_set(sci, NILFS_ST_DAT); goto dat_stage; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_GC: if (nilfs_doing_gc()) { head = &sci->sc_gc_inodes; @@ -1159,7 +1160,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) } sci->sc_stage.gc_inode_ptr = NULL; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_FILE: head = &sci->sc_dirty_files; ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, @@ -1186,7 +1188,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) } nilfs_sc_cstage_inc(sci); sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; - /* Fall through */ + fallthrough; case NILFS_ST_IFILE: err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile, &nilfs_sc_file_ops); @@ -1197,13 +1199,14 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) err = nilfs_segctor_create_checkpoint(sci); if (unlikely(err)) break; - /* Fall through */ + fallthrough; case NILFS_ST_CPFILE: err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, &nilfs_sc_file_ops); if (unlikely(err)) break; - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_SUFILE: err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, &ndone); @@ -1219,7 +1222,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) &nilfs_sc_file_ops); if (unlikely(err)) break; - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_DAT: dat_stage: err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, @@ -1230,7 +1234,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_SR: if (mode == SC_LSEG_SR) { /* Appending a super root */ diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 559de311deca96e29cb07853e050c4ceefb6dcab..3e01d8f2ab9061faa43f865e1f9f3dc2868e5dbc 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1147,7 +1147,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, } switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { - case FAN_MARK_ADD: /* fallthrough */ + case FAN_MARK_ADD: case FAN_MARK_REMOVE: if (!mask) return -EINVAL; diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 1ef24574f48189ff573ab3662f68bd6f4de79bf0..cea739be77c466be3989c57268860c11cce285fa 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -67,7 +67,7 @@ static void o2quo_fence_self(void) default: WARN_ON(o2nm_single_cluster->cl_fence_method >= O2NM_FENCE_METHODS); - /* fall through */ + fallthrough; case O2NM_FENCE_RESET: printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this " "system by restarting ***\n"); diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c index 819428dfa32facda1d27f248fbcde7d4635ca5c9..3ce89216670c9b112fb568bd271a08ad44e35958 100644 --- a/fs/pstore/zone.c +++ b/fs/pstore/zone.c @@ -1081,7 +1081,6 @@ static ssize_t psz_pstore_read(struct pstore_record *record) readop = psz_ftrace_read; break; case PSTORE_TYPE_CONSOLE: - fallthrough; case PSTORE_TYPE_PMSG: readop = psz_record_read; break; diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 5444d3c4d93f37e52827fb895db162848ea6c8cb..47f9e151988b3eda074b4e2eef05273392091a38 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -38,7 +38,7 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd, if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) break; - /*FALLTHROUGH*/ + fallthrough; default: if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index 6b2b4362089e64a5afd7b5da3d267d71a4ba96bd..b57b3ffcbc3273b9972dfa29bc7966141f0c79e8 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c @@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, size_t limit; limit = romfs_maxsize(sb); - if (pos >= limit) + if (pos >= limit || buflen > limit - pos) return -EIO; - if (buflen > limit - pos) - buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) diff --git a/fs/seq_file.c b/fs/seq_file.c index 4e6239f33c066ae45860d685860b90492e82cb15..31219c1db17de3589770a683cfedd8ffd0c5095e 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -295,7 +295,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case SEEK_CUR: offset += file->f_pos; - /* fall through */ + fallthrough; case SEEK_SET: if (offset < 0) break; diff --git a/fs/signalfd.c b/fs/signalfd.c index 5b78719be445591b555fd5e57bb6f9df892e1706..456046e158737450055c77283321867f03b23ee0 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -176,7 +176,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info if (!nonblock) break; ret = -EAGAIN; - /* fall through */ + fallthrough; default: spin_unlock_irq(¤t->sighand->siglock); return ret; diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 76bb1c846845e33737d428dcc26e4840fd4aeebe..8a19773b5a0b7f14b86f16e97643e7b4c76f941b 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -87,7 +87,11 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, int error, i; struct bio *bio; - bio = bio_alloc(GFP_NOIO, page_count); + if (page_count <= BIO_MAX_PAGES) + bio = bio_alloc(GFP_NOIO, page_count); + else + bio = bio_kmalloc(GFP_NOIO, page_count); + if (!bio) return -ENOMEM; diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 22bfda158f7fdbcfb1b327736dd66a2fba2a3898..6d6cd85c2b4cab69e98116c5809352416c4a8577 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -269,7 +269,7 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, break; /* No more room on heap so make it un-categorized */ cat = LPROPS_UNCAT; - /* Fall through */ + fallthrough; case LPROPS_UNCAT: list_add(&lprops->list, &c->uncat_list); break; @@ -313,7 +313,7 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, case LPROPS_FREEABLE: c->freeable_cnt -= 1; ubifs_assert(c, c->freeable_cnt >= 0); - /* Fall through */ + fallthrough; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FRDI_IDX: diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index 6023c97c6da2f269894bdd3966a5a415c6345955..25ff91c7e94afae4b36ca731f16ff0f83762a125 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -52,7 +52,7 @@ static int udf_pc_to_char(struct super_block *sb, unsigned char *from, elen += pc->lengthComponentIdent; break; } - /* Fall through */ + fallthrough; case 2: if (tolen == 0) return -ENAMETOOLONG; diff --git a/fs/ufs/util.h b/fs/ufs/util.h index e1f1b2e868a7d55cc66b4333ad542573df5a5acc..4931bec1a01cad7f76433ca2ef1b01da3bd0ffa0 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -42,7 +42,7 @@ ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, case UFS_ST_SUNOS: if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state); - /* Fall Through - to UFS_ST_SUN */ + fallthrough; /* to UFS_ST_SUN */ case UFS_ST_SUN: return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state); case UFS_ST_SUNx86: @@ -63,7 +63,7 @@ ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value); break; } - /* Fall Through - to UFS_ST_SUN */ + fallthrough; /* to UFS_ST_SUN */ case UFS_ST_SUN: usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value); break; @@ -197,7 +197,7 @@ ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_EFT: if (inode->ui_u1.oldids.ui_suid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid); - /* Fall through */ + fallthrough; default: return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid); } @@ -215,7 +215,7 @@ ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value); if (value > 0xFFFF) value = 0xFFFF; - /* Fall through */ + fallthrough; default: inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value); break; @@ -231,7 +231,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_EFT: if (inode->ui_u1.oldids.ui_sgid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); - /* Fall through */ + fallthrough; default: return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid); } @@ -249,7 +249,7 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value) inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value); if (value > 0xFFFF) value = 0xFFFF; - /* Fall through */ + fallthrough; default: inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value); break; diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c index 96bd160da48baf590c9f169a68a6c7212a1bc111..01805754606711d17c54311f91c11087b03fb4dd 100644 --- a/fs/vboxsf/utils.c +++ b/fs/vboxsf/utils.c @@ -226,7 +226,7 @@ int vboxsf_getattr(const struct path *path, struct kstat *kstat, break; case AT_STATX_FORCE_SYNC: sf_i->force_restat = 1; - /* fall-through */ + fallthrough; default: err = vboxsf_inode_revalidate(dentry); } diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 8623c815164a65845a7ba65fe9a98ebfb1cacba8..305d4bc07337042e31f95f7c4a2518a4dc11c022 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -653,8 +653,8 @@ xfs_attr_shortform_create( ASSERT(ifp->if_flags & XFS_IFINLINE); } xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; - hdr->count = 0; + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; + memset(hdr, 0, sizeof(*hdr)); hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -1036,8 +1036,10 @@ xfs_attr_shortform_verify( * struct xfs_attr_sf_entry has a variable length. * Check the fixed-offset parts of the structure are * within the data buffer. + * xfs_attr_sf_entry is defined with a 1-byte variable + * array at the end, so we must subtract that off. */ - if (((char *)sfep + sizeof(*sfep)) >= endp) + if (((char *)sfep + sizeof(*sfep) - 1) >= endp) return __this_address; /* Don't allow names with known bad length. */ diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 9c40d597103570d834450a9da177cd8cd4dd4635..1b0a01b06a05d13b4f3cf0193dc1034e6412b9c0 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -6226,7 +6226,7 @@ xfs_bmap_validate_extent( isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; - if (isrt) { + if (isrt && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbno(mp, irec->br_startblock)) return __this_address; if (!xfs_verify_rtbno(mp, endfsb)) diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index f742a96a2fe1ff03eb667f019d2982fb948683c8..a6b37db55169c784b880bda8361c04602cf1394f 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -688,7 +688,7 @@ xfs_ialloc_ag_alloc( args.minalignslop = igeo->cluster_align - 1; /* Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; @@ -736,7 +736,7 @@ xfs_ialloc_ag_alloc( /* * Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; } diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index e15129647e00c97143dcfd5b40cf376e3fd8d20f..b7e222befb085fb749bd728ab8559ea305498342 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -110,9 +110,9 @@ xfs_trans_log_inode( * to log the timestamps, or will clear already cleared fields in the * worst case. */ - if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) { + if (inode->i_state & I_DIRTY_TIME) { spin_lock(&inode->i_lock); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); } diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h index c6df01a2a1585d2e16d2dd35f3cf02567ac6a228..7ad3659c5d2a9fc6edd38affbb6ab03c17fcbbac 100644 --- a/fs/xfs/libxfs/xfs_trans_space.h +++ b/fs/xfs/libxfs/xfs_trans_space.h @@ -58,7 +58,7 @@ #define XFS_IALLOC_SPACE_RES(mp) \ (M_IGEO(mp)->ialloc_blks + \ ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \ - (M_IGEO(mp)->inobt_maxlevels - 1))) + M_IGEO(mp)->inobt_maxlevels)) /* * Space reservation values for various transactions. diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 73cafc843cd708bbfc15c3d033a1aa5e977dcbfe..5123f82f247774cb7b2eaec9569ec6b2410395a0 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1165,7 +1165,7 @@ xfs_insert_file_space( goto out_trans_cancel; do { - error = xfs_trans_roll_inode(&tp, ip); + error = xfs_defer_finish(&tp); if (error) goto out_trans_cancel; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index c31cd3be9fb24b33301ab2505ac65251fac70666..a29f78a663ca5427cc40fdea81e4938ce483cf41 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1223,6 +1223,14 @@ __xfs_filemap_fault( return ret; } +static inline bool +xfs_is_write_fault( + struct vm_fault *vmf) +{ + return (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); +} + static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) @@ -1230,7 +1238,7 @@ xfs_filemap_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, IS_DAX(file_inode(vmf->vma->vm_file)) && - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1243,7 +1251,7 @@ xfs_filemap_huge_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, pe_size, - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 5c28199248626d699169fcd64fa4466d82b6fbaf..85513eeb2196682227e6c1377d2bf53ed7510ffa 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1607,12 +1607,18 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, return drm_dp_dpcd_write(aux, offset, &value, 1); } +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, + u8 dpcd[DP_RECEIVER_CAP_SIZE]); + int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, u8 status[DP_LINK_STATUS_SIZE]); bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, u8 real_edid_checksum); +int drm_dp_read_downstream_info(struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]); int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -1628,6 +1634,12 @@ void drm_dp_set_subconnector_property(struct drm_connector *connector, const u8 *dpcd, const u8 port_cap[4]); +struct drm_dp_desc; +bool drm_dp_read_sink_count_cap(struct drm_connector *connector, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const struct drm_dp_desc *desc); +int drm_dp_read_sink_count(struct drm_dp_aux *aux); + void drm_dp_remote_aux_init(struct drm_dp_aux *aux); void drm_dp_aux_init(struct drm_dp_aux *aux); int drm_dp_aux_register(struct drm_dp_aux *aux); @@ -1686,7 +1698,8 @@ enum drm_dp_quirk { * @DP_DPCD_QUIRK_NO_SINK_COUNT: * * The device does not set SINK_COUNT to a non-zero value. - * The driver should ignore SINK_COUNT during detection. + * The driver should ignore SINK_COUNT during detection. Note that + * drm_dp_read_sink_count_cap() automatically checks for this quirk. */ DP_DPCD_QUIRK_NO_SINK_COUNT, /** diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 8b9eb4db3381c45a112e039510fce5ceaf4e72f8..6ae5860d8644e29a83c23cced654223121aa2eb5 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -728,10 +728,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); - +bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]); int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); - int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index c6bab4986a658c9f8a650fd4dfbe22fa4a00e009..fe58dbb46962ac09f980cfc3078e3167fe5643d9 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -29,6 +29,9 @@ /* Slave address for the HDCP registers in the receiver */ #define DRM_HDCP_DDC_ADDR 0x3A +/* Value to use at the end of the SHA-1 bytestream used for repeaters */ +#define DRM_HDCP_SHA1_TERMINATOR 0x80 + /* HDCP register offsets for HDMI/DVI devices */ #define DRM_HDCP_DDC_BKSV 0x00 #define DRM_HDCP_DDC_RI_PRIME 0x08 diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 4fc9a43ac45a8f3c61b5542ae423747adb21056a..aafd07388eb7bf4dc852fb8ad480742bbbb64403 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -164,6 +164,8 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev, * is 0, so no error checking is necessary */ #define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_lock(&dev->mode_config.mutex); \ drm_modeset_acquire_init(&ctx, flags); \ modeset_lock_retry: \ ret = drm_modeset_lock_all_ctx(dev, &ctx); \ @@ -172,6 +174,7 @@ modeset_lock_retry: \ /** * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks + * @dev: drm device * @ctx: local modeset acquire context, will be dereferenced * @ret: local ret/err/etc variable to track error status * @@ -188,7 +191,7 @@ modeset_lock_retry: \ * to that failure. In both of these cases the code between BEGIN/END will not * be run, so the failure will reflect the inability to grab the locks. */ -#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \ +#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ modeset_lock_fail: \ if (ret == -EDEADLK) { \ ret = drm_modeset_backoff(&ctx); \ @@ -196,6 +199,8 @@ modeset_lock_fail: \ goto modeset_lock_retry; \ } \ drm_modeset_drop_locks(&ctx); \ - drm_modeset_acquire_fini(&ctx); + drm_modeset_acquire_fini(&ctx); \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_unlock(&dev->mode_config.mutex); #endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index b9780ae9dd26c140b43e806b3a59cc01afa5cc75..92436553fd6a8124f25be33a49df7c5bb4881e4b 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,15 +33,16 @@ struct drm_gpu_scheduler; struct drm_sched_rq; +/* These are often used as an (initial) index + * to an array, and as such should start at 0. + */ enum drm_sched_priority { DRM_SCHED_PRIORITY_MIN, - DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, DRM_SCHED_PRIORITY_NORMAL, - DRM_SCHED_PRIORITY_HIGH_SW, - DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_KERNEL, - DRM_SCHED_PRIORITY_MAX, - DRM_SCHED_PRIORITY_INVALID = -1, + + DRM_SCHED_PRIORITY_COUNT, DRM_SCHED_PRIORITY_UNSET = -2 }; @@ -274,7 +275,7 @@ struct drm_gpu_scheduler { uint32_t hw_submission_limit; long timeout; const char *name; - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 96e408b4bdc9f482bd23f673905a7dd7022e8de5..8e7ae30ebcbb71592ec4d9bb8abf89b2dadcdd2b 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -258,9 +258,7 @@ INTEL_VGA_DEVICE(0x0f30, info), \ INTEL_VGA_DEVICE(0x0f31, info), \ INTEL_VGA_DEVICE(0x0f32, info), \ - INTEL_VGA_DEVICE(0x0f33, info), \ - INTEL_VGA_DEVICE(0x0157, info), \ - INTEL_VGA_DEVICE(0x0155, info) + INTEL_VGA_DEVICE(0x0f33, info) #define INTEL_BDW_ULT_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ac0c7299d5b8a2aa203037cd9fbc825bf6c29e0c..dd74503f7e5ea997c9075c6fc1097e776af9642b 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -117,11 +117,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index fcd84e8d88f44b073a14fc1de8478df2c3807a28..999636d53cf2703f75a27d50f9a3b46d2b3a3c74 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -11,14 +11,14 @@ #define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL #define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ - static const uint64_t CEPH_FEATURE_##name = (1ULL<sig[3] >> 32); v.sig[6] = set->sig[3]; - /* fall through */ + fallthrough; case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; - /* fall through */ + fallthrough; case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; - /* fall through */ + fallthrough; case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; } return copy_to_user(compat, &v, size) ? -EFAULT : 0; diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 6122efdad6adf3342f41e2d2c98a9ed2e29c2406..ea7b756b1c8f18d94576a3224c1f0a9c40ad8861 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -22,14 +22,8 @@ /* * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. - * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * In the meantime, to support gcc < 5, we implement __has_attribute * by hand. - * - * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ - * depending on the compiler used to build it; however, these attributes have - * no semantic effects for sparse, so it does not matter. Also note that, - * in order to avoid sparse's warnings, even the unsupported ones must be - * defined to 0. */ #ifndef __has_attribute # define __has_attribute(x) __GCC4_has_attribute_##x diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 4b33cb385f96537a918925100f646fd5ebe3ffbc..6e390d58a9f8c5f25406fbc87a48e2fc99415629 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -11,8 +11,8 @@ # define __iomem __attribute__((noderef, address_space(__iomem))) # define __percpu __attribute__((noderef, address_space(__percpu))) # define __rcu __attribute__((noderef, address_space(__rcu))) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); +static inline void __chk_user_ptr(const volatile void __user *ptr) { } +static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } /* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 8f141d4c859ce20cf8f6068148d8c78d4e7fb4c4..a911e5d06845497f3e72ee5f7dfa9ec262e4679b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -956,8 +956,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, case CPUFREQ_RELATION_C: return cpufreq_table_find_index_c(policy, target_freq); default: - pr_err("%s: Invalid relation: %d\n", __func__, relation); - return -EINVAL; + WARN_ON_ONCE(1); + return 0; } } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index a2710e654b642bf78c367d334faf1eb9a0f24e30..3215023d485230d804c0a21ea93152df3215c6e0 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -132,6 +132,7 @@ enum cpuhp_state { CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, + CPUHP_AP_CLINT_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, CPUHP_AP_HYPERV_TIMER_STARTING, CPUHP_AP_KVM_STARTING, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b65909ae4e201131a39367fdb593296bef568aca..75895e6363b89ac79c6a583f16451da818a2a933 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -75,12 +75,13 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ -#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ -#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ -#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_NONE (0x00) +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; diff --git a/include/linux/device.h b/include/linux/device.h index ca18da4768e3e89f3de9b43a5b7d72988d14dcf6..9e6ea8931a52eb4f9d61a5d65fae1af3eac26f6d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -454,6 +454,7 @@ struct dev_links_info { * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. + * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pinctl.rst for details. * @msi_list: Hosts MSI descriptors diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 5a3ce2a2479437a2c9319aaecc15f9a2738c284b..6e87225600ae35230501dea46cb9e7456dbc0ea0 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -73,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, } u64 dma_direct_get_required_mask(struct device *dev); -gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, - u64 *phys_mask); -bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 016b96b384bdda9d57a66240ecf0510140624cf3..52635e91143b25a1518333acc981a459c9990313 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -522,8 +522,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, const void *caller); void dma_common_free_remap(void *cpu_addr, size_t size); -void *dma_alloc_from_pool(struct device *dev, size_t size, - struct page **ret_page, gfp_t flags); +struct page *dma_alloc_from_pool(struct device *dev, size_t size, + void **cpu_addr, gfp_t flags, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); bool dma_free_from_pool(struct device *dev, void *start, size_t size); int diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index aa9ff9e1c0b3280ce1e362852a48b10b006b98c7..8aa0c7c2608cbb0c79637018913802e5d4f48b93 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -49,6 +49,10 @@ struct _ddebug { #if defined(CONFIG_DYNAMIC_DEBUG_CORE) + +/* exported for module authors to exercise >control */ +int dynamic_debug_exec_queries(const char *query, const char *modname); + int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); extern int ddebug_remove_module(const char *mod_name); @@ -105,7 +109,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, static_branch_unlikely(&descriptor.key.dd_key_false) #endif -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ #define _DPRINTK_KEY_INIT @@ -117,7 +121,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) #endif -#endif +#endif /* CONFIG_JUMP_LABEL */ #define __dynamic_func_call(id, fmt, func, ...) do { \ DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ @@ -172,10 +176,11 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, KERN_DEBUG, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) -#else +#else /* !CONFIG_DYNAMIC_DEBUG_CORE */ #include #include +#include static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname) @@ -210,6 +215,13 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii); \ } while (0) -#endif + +static inline int dynamic_debug_exec_queries(const char *query, const char *modname) +{ + pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n"); + return 0; +} + +#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */ #endif diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h index 57eac5241303af5a66c3f39746f09dbbf3b477c7..a97a12bb2c9ef114722423d5722959a9669c5985 100644 --- a/include/linux/efi_embedded_fw.h +++ b/include/linux/efi_embedded_fw.h @@ -8,8 +8,8 @@ #define EFI_EMBEDDED_FW_PREFIX_LEN 8 /* - * This struct and efi_embedded_fw_list are private to the efi-embedded fw - * implementation they are in this header for use by lib/test_firmware.c only! + * This struct is private to the efi-embedded fw implementation. + * They are in this header for use by lib/test_firmware.c only! */ struct efi_embedded_fw { struct list_head list; @@ -18,8 +18,6 @@ struct efi_embedded_fw { size_t length; }; -extern struct list_head efi_embedded_fw_list; - /** * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw * code to search for embedded firmwares. diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index efebbffcd5cc971dfc5197e32eea6bbdd991dc2c..159c7476b11b4852b84a095f62bffbe07ecab0d0 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -110,15 +110,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs #endif /** - * syscall_enter_from_user_mode - Check and handle work before invoking - * a syscall + * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts * @regs: Pointer to currents pt_regs - * @syscall: The syscall number * * Invoked from architecture specific syscall entry code with interrupts * disabled. The calling code has to be non-instrumentable. When the - * function returns all state is correct and the subsequent functions can be - * instrumented. + * function returns all state is correct, interrupts are enabled and the + * subsequent functions can be instrumented. + * + * This handles lockdep, RCU (context tracking) and tracing state. + * + * This is invoked when there is extra architecture specific functionality + * to be done between establishing state and handling user mode entry work. + */ +void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); + +/** + * syscall_enter_from_user_mode_work - Check and handle work before invoking + * a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * enabled after invoking syscall_enter_from_user_mode_prepare() and extra + * architecture specific work. * * Returns: The original or a modified syscall number * @@ -127,12 +142,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs * syscall_set_return_value() first. If neither of those are called and -1 * is returned, then the syscall will fail with ENOSYS. * - * The following functionality is handled here: + * It handles the following work items: * - * 1) Establish state (lockdep, RCU (context tracking), tracing) - * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(), + * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(), * __secure_computing(), trace_sys_enter() - * 3) Invocation of audit_syscall_entry() + * 2) Invocation of audit_syscall_entry() + */ +long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); + +/** + * syscall_enter_from_user_mode - Establish state and check and handle work + * before invoking a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct, interrupts are enabled and the + * subsequent functions can be instrumented. + * + * This is combination of syscall_enter_from_user_mode_prepare() and + * syscall_enter_from_user_mode_work(). + * + * Returns: The original or a modified syscall number. See + * syscall_enter_from_user_mode_work() for further explanation. */ long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); diff --git a/include/linux/filter.h b/include/linux/filter.h index 0a355b005bf458dadcec078d3ca2a94e75054f0c..ebfb7cfb65f10bbfd00e6bf63712e9e4720f351a 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1200,7 +1200,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } - /* Fallthrough. */ + fallthrough; default: return ftest->code; } diff --git a/include/linux/fs.h b/include/linux/fs.h index e019ea2f1347e6ebddea6cfeab01047c6af00660..7519ae003a082cfd3d87d7628a91bd8f3b3a9fda 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2132,6 +2132,10 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * * I_DONTCACHE Evict inode as soon as it is not used anymore. * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2149,12 +2153,11 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define __I_DIRTY_TIME_EXPIRED 12 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) #define I_DONTCACHE (1 << 16) +#define I_SYNC_QUEUED (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) diff --git a/include/linux/hid.h b/include/linux/hid.h index 875f71132b1425b494ba9eb538f29543cb857753..c7044a14200ea2744f69c589672b2c7e0fe9b6a4 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -959,34 +959,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -1000,7 +1015,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index d03071732db4add88eadef01988c6585b23b5955..7c522fdd9ea735280a2cd04ada1507e766967ff6 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -53,6 +53,20 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -64,6 +78,7 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; + struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index bd5c55755447c5303b7be70588fb41eb9746768d..3ed4e8771b64e78fc9d42eb68de8ea5b171ecc02 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -49,17 +49,18 @@ struct irqtrace_events { DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); - extern void trace_hardirqs_on_prepare(void); - extern void trace_hardirqs_off_finish(void); - extern void trace_hardirqs_on(void); - extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context() (this_cpu_read(hardirq_context)) +extern void trace_hardirqs_on_prepare(void); +extern void trace_hardirqs_off_finish(void); +extern void trace_hardirqs_on(void); +extern void trace_hardirqs_off(void); + +# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_hardirq_enter() \ do { \ - if (this_cpu_inc_return(hardirq_context) == 1) \ + if (__this_cpu_inc_return(hardirq_context) == 1)\ current->hardirq_threaded = 0; \ } while (0) # define lockdep_hardirq_threaded() \ @@ -68,7 +69,7 @@ do { \ } while (0) # define lockdep_hardirq_exit() \ do { \ - this_cpu_dec(hardirq_context); \ + __this_cpu_dec(hardirq_context); \ } while (0) # define lockdep_softirq_enter() \ do { \ @@ -120,17 +121,17 @@ do { \ #else # define trace_hardirqs_on_prepare() do { } while (0) # define trace_hardirqs_off_finish() do { } while (0) -# define trace_hardirqs_on() do { } while (0) -# define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_threaded() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define lockdep_hardirq_context() 0 +# define lockdep_softirq_context(p) 0 +# define lockdep_hardirqs_enabled() 0 +# define lockdep_softirqs_enabled(p) 0 +# define lockdep_hardirq_enter() do { } while (0) +# define lockdep_hardirq_threaded() do { } while (0) +# define lockdep_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define lockdep_hrtimer_enter(__hrtimer) false # define lockdep_hrtimer_exit(__context) do { } while (0) # define lockdep_posixtimer_enter() do { } while (0) @@ -181,26 +182,33 @@ do { \ * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS -#define local_irq_enable() \ - do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) -#define local_irq_disable() \ - do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) + +#define local_irq_enable() \ + do { \ + trace_hardirqs_on(); \ + raw_local_irq_enable(); \ + } while (0) + +#define local_irq_disable() \ + do { \ + bool was_disabled = raw_irqs_disabled();\ + raw_local_irq_disable(); \ + if (!was_disabled) \ + trace_hardirqs_off(); \ + } while (0) + #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while (0) - #define local_irq_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + raw_local_irq_restore(flags); \ } while (0) #define safe_halt() \ @@ -214,10 +222,7 @@ do { \ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) -#define local_irq_save(flags) \ - do { \ - raw_local_irq_save(flags); \ - } while (0) +#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 4aaa29772bb0f1d91f2f17c005f5fc820c7648cd..08f904943ab272b7113cfb7744ea63f87a70533d 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1381,7 +1381,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); -extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); +extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush (journal_t *); extern void jbd2_journal_lock_updates (journal_t *); diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 19ddd43aee68958d64ba789c1e83d2bc1eb44f82..cfb62e9f37be090da5a20e4b99c5d3bc31fe7f7e 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -86,17 +86,17 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) } /* Last block: affect all 32 bits of (c) */ switch (length) { - case 12: c += (u32)k[11]<<24; /* fall through */ - case 11: c += (u32)k[10]<<16; /* fall through */ - case 10: c += (u32)k[9]<<8; /* fall through */ - case 9: c += k[8]; /* fall through */ - case 8: b += (u32)k[7]<<24; /* fall through */ - case 7: b += (u32)k[6]<<16; /* fall through */ - case 6: b += (u32)k[5]<<8; /* fall through */ - case 5: b += k[4]; /* fall through */ - case 4: a += (u32)k[3]<<24; /* fall through */ - case 3: a += (u32)k[2]<<16; /* fall through */ - case 2: a += (u32)k[1]<<8; /* fall through */ + case 12: c += (u32)k[11]<<24; fallthrough; + case 11: c += (u32)k[10]<<16; fallthrough; + case 10: c += (u32)k[9]<<8; fallthrough; + case 9: c += k[8]; fallthrough; + case 8: b += (u32)k[7]<<24; fallthrough; + case 7: b += (u32)k[6]<<16; fallthrough; + case 6: b += (u32)k[5]<<8; fallthrough; + case 5: b += k[4]; fallthrough; + case 4: a += (u32)k[3]<<24; fallthrough; + case 3: a += (u32)k[2]<<16; fallthrough; + case 2: a += (u32)k[1]<<8; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ @@ -132,8 +132,8 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) /* Handle the last 3 u32's */ switch (length) { - case 3: c += k[2]; /* fall through */ - case 2: b += k[1]; /* fall through */ + case 3: c += k[2]; fallthrough; + case 2: b += k[1]; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 500def620d8f4933a46ebecb453ade196f4ea998..c25b8e41c0ea37147c5a0e4683c7a011dbd0beb8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -186,7 +186,7 @@ * lower_32_bits - return bits 0-31 of a number * @n: the number we're accessing */ -#define lower_32_bits(n) ((u32)(n)) +#define lower_32_bits(n) ((u32)((n) & 0xffffffff)) struct completion; struct pt_regs; diff --git a/include/linux/ksm.h b/include/linux/ksm.h index e48b1e453ff5109292f7a735b7e4c73a0caf43b9..161e8164abcf535f30417593b055e3c3c5acf564 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page, void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); -bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, unsigned long address); #else /* !CONFIG_KSM */ @@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page, static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } -static inline bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, unsigned long address) -{ - return false; -} #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a23076765b4cc26040bd6b3110f06c1d14724e7b..05e3c2fb3ef7828438c8456cd3553ecaa4561bc6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -749,25 +749,46 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); -#define __kvm_put_guest(kvm, gfn, offset, value, type) \ +#define __kvm_get_guest(kvm, gfn, offset, v) \ ({ \ unsigned long __addr = gfn_to_hva(kvm, gfn); \ - type __user *__uaddr = (type __user *)(__addr + offset); \ + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ int __ret = -EFAULT; \ \ if (!kvm_is_error_hva(__addr)) \ - __ret = put_user(value, __uaddr); \ + __ret = get_user(v, __uaddr); \ + __ret; \ +}) + +#define kvm_get_guest(kvm, gpa, v) \ +({ \ + gpa_t __gpa = gpa; \ + struct kvm *__kvm = kvm; \ + \ + __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ + offset_in_page(__gpa), v); \ +}) + +#define __kvm_put_guest(kvm, gfn, offset, v) \ +({ \ + unsigned long __addr = gfn_to_hva(kvm, gfn); \ + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ + int __ret = -EFAULT; \ + \ + if (!kvm_is_error_hva(__addr)) \ + __ret = put_user(v, __uaddr); \ if (!__ret) \ mark_page_dirty(kvm, gfn); \ __ret; \ }) -#define kvm_put_guest(kvm, gpa, value, type) \ +#define kvm_put_guest(kvm, gpa, v) \ ({ \ gpa_t __gpa = gpa; \ struct kvm *__kvm = kvm; \ + \ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ - offset_in_page(__gpa), (value), type); \ + offset_in_page(__gpa), v); \ }) int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); diff --git a/include/linux/libata.h b/include/linux/libata.h index 77ccf040a128b1461f9a1762eaf3c6439eabc6eb..5f550eb27f811ad7b73c72d9a84daae3c730c900 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -421,6 +421,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 62a382d1845bd56a2cd7c6f4398056723e39142f..6a584b3e5c74f5bedf9c8cf1695e30585335770b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -535,19 +535,27 @@ do { \ DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirq_context); +/* + * The below lockdep_assert_*() macros use raw_cpu_read() to access the above + * per-cpu variables. This is required because this_cpu_read() will potentially + * call into preempt/irq-disable and that obviously isn't right. This is also + * correct because when IRQs are enabled, it doesn't matter if we accidentally + * read the value from our previous CPU. + */ + #define lockdep_assert_irqs_enabled() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_irqs_disabled() \ do { \ - WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ + WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ } while (0) #define lockdep_assert_in_irq() \ do { \ - WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ } while (0) #define lockdep_assert_preemption_enabled() \ @@ -555,7 +563,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ + !raw_cpu_read(hardirqs_enabled))); \ } while (0) #define lockdep_assert_preemption_disabled() \ @@ -563,7 +571,7 @@ do { \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ debug_locks && \ (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ + raw_cpu_read(hardirqs_enabled))); \ } while (0) #else diff --git a/include/linux/log2.h b/include/linux/log2.h index 83a4a3ca3e8a76f4a80b8740bd8f82b2d68fae59..c619ec6eff4aee113ab5a20381a9662a9df02796 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 5f5b2df06e61045ed276fa9dec99bf087477f688..e5862746751b1e72f17b0f7efe318e470a7ef79a 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -46,11 +46,10 @@ struct vmem_altmap { * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * - * MEMORY_DEVICE_DEVDAX: + * MEMORY_DEVICE_GENERIC: * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. In contrast to - * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax - * character device. + * coherent and supports page pinning. This is for example used by DAX devices + * that expose memory using a character device. * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer @@ -60,7 +59,7 @@ enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_FS_DAX, - MEMORY_DEVICE_DEVDAX, + MEMORY_DEVICE_GENERIC, MEMORY_DEVICE_PCI_P2PDMA, }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 1983e08f590624cdc9b87e5d1ce149372f3be2f4..ca6e6a81576b932e804d64cc689fc730c85860a3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -157,11 +157,14 @@ static inline void __mm_zero_struct_page(struct page *page) switch (sizeof(struct page)) { case 80: - _pp[9] = 0; /* fallthrough */ + _pp[9] = 0; + fallthrough; case 72: - _pp[8] = 0; /* fallthrough */ + _pp[8] = 0; + fallthrough; case 64: - _pp[7] = 0; /* fallthrough */ + _pp[7] = 0; + fallthrough; case 56: _pp[6] = 0; _pp[5] = 0; @@ -321,6 +324,8 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ +#elif defined(CONFIG_PPC) +# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index c51a84132d7c08000f00e1edbabf6ceb2cfd8693..03dee12d2b61ce3548ec041e1197caba175df8a3 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -3,10 +3,15 @@ #define _LINUX_MMU_CONTEXT_H #include +#include /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif +#ifndef leave_mm +static inline void leave_mm(int cpu) { } +#endif + #endif diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa822d3a484f6f42f840ad2c7d5dc760..625f491b95de87904892dc52e6e5bf5bba8a57fa 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 851425c3178f178d3c325773f2a77fa9ec6e3c40..89016d08f6a278d9a7703cc4256c277f05fa7173 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index aac42c28fe62d746c710370fa63c2c9aef2feae9..9b67394471e1c63462327d5cefdd7ab2701e0acf 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -58,7 +58,6 @@ struct nf_ipv6_ops { int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) - int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, @@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #include -static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, - u32 user) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->br_defrag(net, skb, user); -#elif IS_BUILTIN(CONFIG_IPV6) - return nf_ct_frag6_gather(net, skb, user); -#else - return 1; -#endif -} - int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index a124c21e320472b249b4e4ceed5a02d22769585b..e8cbc2e795d530099fe135a51b9e1fcd63e03386 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -117,7 +117,9 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ +#ifndef pgd_offset_k #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) +#endif /* * In many cases it is known that a virtual address is mapped at PMD or PTE diff --git a/include/linux/phylink.h b/include/linux/phylink.h index a8e876317e254ef3730ac993c93b99baae378a23..c36fb41a7d9036d8504a06a60982911b34dd6ed8 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -402,7 +402,8 @@ void pcs_get_state(struct phylink_pcs *pcs, * For most 10GBASE-R, there is no advertisement. */ int pcs_config(struct phylink_pcs *pcs, unsigned int mode, - phy_interface_t interface, const unsigned long *advertising); + phy_interface_t interface, const unsigned long *advertising, + bool permit_pause_to_mac); /** * pcs_an_restart() - restart 802.3z BaseX autonegotiation diff --git a/include/linux/powercap.h b/include/linux/powercap.h index 4537f57f9e42f7a1441b54f96f79128d274234cc..3d557bbcd2c712176e774b2a6d9f8c513c9fa950 100644 --- a/include/linux/powercap.h +++ b/include/linux/powercap.h @@ -44,19 +44,18 @@ struct powercap_control_type_ops { }; /** - * struct powercap_control_type- Defines a powercap control_type - * @name: name of control_type + * struct powercap_control_type - Defines a powercap control_type * @dev: device for this control_type * @idr: idr to have unique id for its child - * @root_node: Root holding power zones for this control_type + * @nr_zones: counter for number of zones of this type * @ops: Pointer to callback struct - * @node_lock: mutex for control type + * @lock: mutex for control type * @allocated: This is possible that client owns the memory * used by this structure. In this case * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. - * @ctrl_inst: link to the control_type list + * @node: linked-list node * * Defines powercap control_type. This acts as a container for power * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc. @@ -129,7 +128,7 @@ struct powercap_zone_ops { * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. - * @constraint_ptr: List of constraints for this zone. + * @constraints: List of constraints for this zone. * * This defines a power zone instance. The fields of this structure are * private, and should not be used by client drivers. diff --git a/include/linux/sched.h b/include/linux/sched.h index 93ecd930efd316554705aecc3053aea778eec397..afe01e232935fa44c11272442b75da197bd58353 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1666,7 +1666,7 @@ extern struct task_struct *idle_task(int cpu); * * Return: 1 if @p is an idle task. 0 otherwise. */ -static inline bool is_idle_task(const struct task_struct *p) +static __always_inline bool is_idle_task(const struct task_struct *p) { return !!(p->flags & PF_IDLE); } diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 917d88edb7b9d95a86537168faae05581b4e0f52..a8ec3b6093fcbde053f49b09c4f91108bfd66e5d 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -36,6 +36,9 @@ struct user_struct { defined(CONFIG_NET) || defined(CONFIG_IO_URING) atomic_long_t locked_vm; #endif +#ifdef CONFIG_WATCH_QUEUE + atomic_t nr_watches; /* The number of watches this user currently has */ +#endif /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; diff --git a/include/linux/signal.h b/include/linux/signal.h index 6bb1a3f0258c27810c14e8daeac99a77beb33108..7bbc0e9cf084728ba6b28fbfb3ed736ee4a6e576 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -137,11 +137,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ - /* fall through */ \ + fallthrough; \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ - /* fall through */ \ + fallthrough; \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ @@ -171,9 +171,9 @@ static inline void name(sigset_t *set) \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ - /* fall through */ \ + fallthrough; \ case 2: set->sig[1] = op(set->sig[1]); \ - /* fall through */ \ + fallthrough; \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ @@ -194,7 +194,7 @@ static inline void sigemptyset(sigset_t *set) memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; - /* fall through */ + fallthrough; case 1: set->sig[0] = 0; break; } @@ -207,7 +207,7 @@ static inline void sigfillset(sigset_t *set) memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; - /* fall through */ + fallthrough; case 1: set->sig[0] = -1; break; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 46881d9021241c27ca39e2cf16665cb1e18a0cfa..ed9bea924dc3e2cac674da7a4d1f7ed1fd741f9e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -71,7 +71,7 @@ * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain * TCP or UDP packets over IPv6. These are specifically * unencapsulated packets of the form IPv6|TCP or - * IPv4|UDP where the Next Header field in the IPv6 + * IPv6|UDP where the Next Header field in the IPv6 * header is either TCP or UDP. IPv6 extension headers * are not supported with this feature. This feature * cannot be set in features for a device with @@ -1056,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); + +#ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); +#else +static inline void consume_skb(struct sk_buff *skb) +{ + return kfree_skb(skb); +} +#endif + void __consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; @@ -2658,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) * * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) * to reduce average number of cache lines per packet. - * get_rps_cpus() for example only access one 64 bytes aligned block : + * get_rps_cpu() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD @@ -3745,19 +3754,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, #define __it(x, op) (x -= sizeof(u##op)) #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) case 32: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 24: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 16: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 8: diffs |= __it_diff(a, b, 64); break; case 28: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 20: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 12: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 4: diffs |= __it_diff(a, b, 32); break; } diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 49c5d29cd33c812cb4761ae4a5aad58dc1ac0bf3..cf27b080e148216905d65c8be80630e2fb624784 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -220,6 +220,9 @@ struct ti_sci_rm_core_ops { u16 *range_start, u16 *range_num); }; +#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0 +#define TI_SCI_RESASG_SUBTYPE_IA_VINT 0xa +#define TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT 0xd /** * struct ti_sci_rm_irq_ops: IRQ management operations * @set_irq: Set an IRQ route between the requested source @@ -556,6 +559,9 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res); struct ti_sci_resource * devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, char *of_prop); +struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type); #else /* CONFIG_TI_SCI_PROTOCOL */ @@ -609,6 +615,13 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, { return ERR_PTR(-EINVAL); } + +static inline struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type); +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TI_SCI_PROTOCOL */ #endif /* __TISCI_PROTOCOL_H */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 2e6ca53b9bbd5e9257902809edac87172d103176..18e75974d4e37bd76f6b31d88951a9cededaec63 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGFAULT, PGMAJFAULT, PGLAZYFREED, PGREFILL, + PGREUSE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSCAN_KSWAPD, diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index adcc6a97db6123e7a19ed43e59659f4874b9b5a0..143568d64b207ebd9acd8eb3e420cef286d9f9e9 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -308,7 +308,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ R##_e = X##_e; \ - /* Fall through */ \ + fallthrough; \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ @@ -319,7 +319,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ R##_e = Y##_e; \ - /* Fall through */ \ + fallthrough; \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ @@ -417,7 +417,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ R##_s = X##_s; \ - /* Fall through */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ @@ -431,7 +431,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ R##_s = Y##_s; \ - /* Fall through */ \ + fallthrough; \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ @@ -497,7 +497,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ FP_SET_EXCEPTION(FP_EX_DIVZERO); \ - /* Fall through */ \ + fallthrough; \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ R##_c = FP_CLS_INF; \ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index ba3f6c15ad2b5eb1c7274c410a4f18e36defdde2..18f783dcd55fa351c378859e50210bb6ccafc41f 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -97,7 +97,8 @@ bool ipv6_chk_custom_prefix(const struct in6_addr *addr, int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); -struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr); +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr, + struct net_device *dev); struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 91eacbdcf33d2b7d38971673e274a63835c0d271..f6abcc0bbd6e7686c24d081c72693bc15b359bad 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *); -u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 9205a76d967a03e4162bad03f2f4e53d6e976506..38e4094960cee8112d19bb3e60e8d8ed7f6fa4b4 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -494,7 +494,7 @@ int igmp6_event_report(struct sk_buff *skb); #ifdef CONFIG_SYSCTL int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index bf9491b77d1689092ba02213f9a524fa10cb5ef5..224d194ad29d06a57fc04f42817421a22d5721d8 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(const u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h index 9b1d43d671a3f6b157db24ba3c80b14893f318d0..8c18dc6d3fde5e44f2aba8f87c67096d5dd092d2 100644 --- a/include/soc/nps/common.h +++ b/include/soc/nps/common.h @@ -45,6 +45,12 @@ #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 +#ifndef AUX_IENABLE +#define AUX_IENABLE 0x40c +#endif + +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) + #ifndef __ASSEMBLY__ /* In order to increase compilation test coverage */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index cc41d692ae8edfa4421221da51c528d57eb8bf8f..4c8b99ec8606d078d16d66fb6b883df81a522e03 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -746,24 +746,29 @@ TRACE_EVENT(ext4_mb_release_group_pa, ); TRACE_EVENT(ext4_discard_preallocations, - TP_PROTO(struct inode *inode), + TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed), - TP_ARGS(inode), + TP_ARGS(inode, len, needed), TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( unsigned int, len ) + __field( unsigned int, needed ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; + __entry->len = len; + __entry->needed = needed; ), - TP_printk("dev %d,%d ino %lu", + TP_printk("dev %d,%d ino %lu len: %u needed %u", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino) + (unsigned long) __entry->ino, __entry->len, + __entry->needed) ); TRACE_EVENT(ext4_mb_discard_preallocations, @@ -1312,18 +1317,34 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load, TP_ARGS(sb, group) ); -DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load, +DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, TP_PROTO(struct super_block *sb, unsigned long group), TP_ARGS(sb, group) ); -DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap, +TRACE_EVENT(ext4_read_block_bitmap_load, + TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch), - TP_PROTO(struct super_block *sb, unsigned long group), + TP_ARGS(sb, group, prefetch), - TP_ARGS(sb, group) + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + __field( bool, prefetch ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + __entry->prefetch = prefetch; + ), + + TP_printk("dev %d,%d group %u prefetch %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group, __entry->prefetch) ); TRACE_EVENT(ext4_direct_IO_enter, @@ -2726,6 +2747,50 @@ TRACE_EVENT(ext4_error, __entry->function, __entry->line) ); +TRACE_EVENT(ext4_prefetch_bitmaps, + TP_PROTO(struct super_block *sb, ext4_group_t group, + ext4_group_t next, unsigned int prefetch_ios), + + TP_ARGS(sb, group, next, prefetch_ios), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + __field( __u32, next ) + __field( __u32, ios ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + __entry->next = next; + __entry->ios = prefetch_ios; + ), + + TP_printk("dev %d,%d group %u next %u ios %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->group, __entry->next, __entry->ios) +); + +TRACE_EVENT(ext4_lazy_itable_init, + TP_PROTO(struct super_block *sb, ext4_group_t group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + ), + + TP_printk("dev %d,%d group %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group) +); + #endif /* _TRACE_EXT4_H */ /* This part must be outside protection */ diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 939092dbcb8bf37d9e17529a5081c0692f0f4672..5fb7520343863648160da1564621e83422985d97 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -114,6 +114,8 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) #if defined(CONFIG_X86) #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } +#elif defined(CONFIG_PPC) +#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } #elif !defined(CONFIG_MMU) diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 059b6e45a02833881bf9f61c13ca523b16266447..c33079b986e86b463b0b13f7b74e7d28c64a1fa0 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -138,11 +138,16 @@ enum rxrpc_recvmsg_trace { }; enum rxrpc_rtt_tx_trace { + rxrpc_rtt_tx_cancel, rxrpc_rtt_tx_data, + rxrpc_rtt_tx_no_slot, rxrpc_rtt_tx_ping, }; enum rxrpc_rtt_rx_trace { + rxrpc_rtt_rx_cancel, + rxrpc_rtt_rx_lost, + rxrpc_rtt_rx_obsolete, rxrpc_rtt_rx_ping_response, rxrpc_rtt_rx_requested_ack, }; @@ -339,10 +344,15 @@ enum rxrpc_tx_point { E_(rxrpc_recvmsg_wait, "WAIT") #define rxrpc_rtt_tx_traces \ + EM(rxrpc_rtt_tx_cancel, "CNCE") \ EM(rxrpc_rtt_tx_data, "DATA") \ + EM(rxrpc_rtt_tx_no_slot, "FULL") \ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ + EM(rxrpc_rtt_rx_cancel, "CNCL") \ + EM(rxrpc_rtt_rx_obsolete, "OBSL") \ + EM(rxrpc_rtt_rx_lost, "LOST") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \ E_(rxrpc_rtt_rx_requested_ack, "RACK") @@ -1087,38 +1097,43 @@ TRACE_EVENT(rxrpc_recvmsg, TRACE_EVENT(rxrpc_rtt_tx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why, - rxrpc_serial_t send_serial), + int slot, rxrpc_serial_t send_serial), - TP_ARGS(call, why, send_serial), + TP_ARGS(call, why, slot, send_serial), TP_STRUCT__entry( __field(unsigned int, call ) __field(enum rxrpc_rtt_tx_trace, why ) + __field(int, slot ) __field(rxrpc_serial_t, send_serial ) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; + __entry->slot = slot; __entry->send_serial = send_serial; ), - TP_printk("c=%08x %s sr=%08x", + TP_printk("c=%08x [%d] %s sr=%08x", __entry->call, + __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __entry->send_serial) ); TRACE_EVENT(rxrpc_rtt_rx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, + int slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, u32 rtt, u32 rto), - TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), + TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto), TP_STRUCT__entry( __field(unsigned int, call ) __field(enum rxrpc_rtt_rx_trace, why ) + __field(int, slot ) __field(rxrpc_serial_t, send_serial ) __field(rxrpc_serial_t, resp_serial ) __field(u32, rtt ) @@ -1128,14 +1143,16 @@ TRACE_EVENT(rxrpc_rtt_rx, TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; + __entry->slot = slot; __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; __entry->rtt = rtt; __entry->rto = rto; ), - TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", + TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u", __entry->call, + __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 10f5d1fa73476a9cb4fb5e21b24d25f3c599500b..e7cbccc7c14cc28c1bb77557583df75974e60dd7 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -20,7 +20,6 @@ {I_CLEAR, "I_CLEAR"}, \ {I_SYNC, "I_SYNC"}, \ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ - {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ {I_REFERENCED, "I_REFERENCED"} \ ) @@ -498,8 +497,9 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before, int moved), - TP_ARGS(wb, work, moved), + TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) @@ -509,19 +509,17 @@ TRACE_EVENT(writeback_queue_io, __field(ino_t, cgroup_ino) ), TP_fast_assign( - unsigned long *older_than_this = work->older_than_this; strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); - __entry->older = older_than_this ? *older_than_this : 0; - __entry->age = older_than_this ? - (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->older = dirtied_before; + __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu", __entry->name, - __entry->older, /* older_than_this in jiffies */ - __entry->age, /* older_than_this in relative milliseconds */ + __entry->older, /* dirtied_before in jiffies */ + __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), (unsigned long)__entry->cgroup_ino diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 3218576e109d95a37d297b9171679a6cf5498767..c5ff2b275fcd7c30d6f657456ff35a784df47116 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -673,6 +673,7 @@ struct drm_amdgpu_cs_chunk_data { */ #define AMDGPU_IDS_FLAGS_FUSION 0x1 #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 +#define AMDGPU_IDS_FLAGS_TMZ 0x4 /* indicate if acceleration can be working */ #define AMDGPU_INFO_ACCEL_WORKING 0x00 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 00546062e0235d9b445c5288573e3bd1854aa04a..fa1f3d62f9a6cebb300c63bf82955af91ab2a6ed 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -619,6 +619,12 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_PERF_REVISION 54 +/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of + * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See + * I915_EXEC_USE_EXTENSIONS. + */ +#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 + /* Must be kept compact -- no holes and well documented */ typedef struct drm_i915_getparam { @@ -1046,6 +1052,38 @@ struct drm_i915_gem_exec_fence { __u32 flags; }; +/** + * See drm_i915_gem_execbuffer_ext_timeline_fences. + */ +#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 + +/** + * This structure describes an array of drm_syncobj and associated points for + * timeline variants of drm_syncobj. It is invalid to append this structure to + * the execbuf if I915_EXEC_FENCE_ARRAY is set. + */ +struct drm_i915_gem_execbuffer_ext_timeline_fences { + struct i915_user_extension base; + + /** + * Number of element in the handles_ptr & value_ptr arrays. + */ + __u64 fence_count; + + /** + * Pointer to an array of struct drm_i915_gem_exec_fence of length + * fence_count. + */ + __u64 handles_ptr; + + /** + * Pointer to an array of u64 values of length fence_count. Values + * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline + * drm_syncobj is invalid as it turns a drm_syncobj into a binary one. + */ + __u64 values_ptr; +}; + struct drm_i915_gem_execbuffer2 { /** * List of gem_exec_object2 structs @@ -1062,8 +1100,14 @@ struct drm_i915_gem_execbuffer2 { __u32 num_cliprects; /** * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY - * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a - * struct drm_i915_gem_exec_fence *fences. + * & I915_EXEC_USE_EXTENSIONS are not set. + * + * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array + * of struct drm_i915_gem_exec_fence and num_cliprects is the length + * of the array. + * + * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a + * single struct i915_user_extension and num_cliprects is 0. */ __u64 cliprects_ptr; #define I915_EXEC_RING_MASK (0x3f) @@ -1181,7 +1225,16 @@ struct drm_i915_gem_execbuffer2 { */ #define I915_EXEC_FENCE_SUBMIT (1 << 20) -#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1)) +/* + * Setting I915_EXEC_USE_EXTENSIONS implies that + * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked + * list of i915_user_extension. Each i915_user_extension node is the base of a + * larger structure. The list of supported structures are listed in the + * drm_i915_gem_execbuffer_ext enum. + */ +#define I915_EXEC_USE_EXTENSIONS (1 << 21) + +#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0480f893facd27779648f3331a27a5c1db537722..b6238b2209b71a3d9094b7eec1eb67130175ed62 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -767,7 +767,7 @@ union bpf_attr { * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice - * bloc (spanning several lines) is printed to kernel logs and + * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values @@ -1033,14 +1033,14 @@ union bpf_attr { * * int ret; * struct bpf_tunnel_key key = {}; - * + * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet - * + * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet - * + * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices @@ -1147,7 +1147,7 @@ union bpf_attr { * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The - * indentifier retrieved is a user-provided tag, similar to the + * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 3d0d8231dc196891ecee80f63326d9d7db8b0125..7d6687618d80802ce10da1aa4e326b55bd9af014 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -135,7 +135,7 @@ struct in_addr { * this socket to prevent accepting spoofed ones. */ #define IP_PMTUDISC_INTERFACE 4 -/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get +/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get * fragmented if they exeed the interface mtu */ #define IP_PMTUDISC_OMIT 5 diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index f738c3b53f4eaba4a58f8427912f7f3221b321eb..695b606da4b15b58704c68de601cdbc178b5ca2f 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -449,8 +449,15 @@ struct kfd_ioctl_import_dmabuf_args { /* * KFD SMI(System Management Interface) events */ -/* Event type (defined by bitmask) */ -#define KFD_SMI_EVENT_VMFAULT 0x0000000000000001 +enum kfd_smi_event { + KFD_SMI_EVENT_NONE = 0, /* not used */ + KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ + KFD_SMI_EVENT_THERMAL_THROTTLE = 2, + KFD_SMI_EVENT_GPU_PRE_RESET = 3, + KFD_SMI_EVENT_GPU_POST_RESET = 4, +}; + +#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) struct kfd_ioctl_smi_events_args { __u32 gpuid; /* to KFD */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f6d86033c4fa3d21cc7c643c85d443b053c704c9..7d8eced6f459b065c445ba8dcbc107f785dc24fa 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -790,9 +790,10 @@ struct kvm_ppc_resize_hpt { #define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_PR 2 -/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */ -#define KVM_VM_MIPS_TE 0 +/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */ +#define KVM_VM_MIPS_AUTO 0 #define KVM_VM_MIPS_VZ 1 +#define KVM_VM_MIPS_TE 2 #define KVM_S390_SIE_PAGE_OFFSET 1 @@ -1035,6 +1036,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_LAST_CPU 184 #define KVM_CAP_SMALLER_MAXPHYADDR 185 #define KVM_CAP_S390_DIAG318 186 +#define KVM_CAP_STEAL_TIME 187 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 42f351c1f5c5333c43a4abde25cb8c43ba277a5f..2b8e12f7a4a6523a8a27a4212e3a09b5286e156f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -133,7 +133,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h index d7f6af50e200bf34f9d1211376eb92a946f620fb..39df751d0dc43b12fa3299af12ec705f2b560965 100644 --- a/include/xen/arm/page.h +++ b/include/xen/arm/page.h @@ -76,7 +76,11 @@ static inline unsigned long bfn_to_pfn(unsigned long bfn) #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) /* VIRT <-> GUEST conversion */ -#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) +#define virt_to_gfn(v) \ + ({ \ + WARN_ON_ONCE(!virt_addr_valid(v)); \ + pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT); \ + }) #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) /* Only used in PV code. But ARM guests are always HVM. */ diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 6fb95aa194051fcea50416860971f9ed0d260ab1..6dbdb0b3fd03da472d017c413f5481fb977f88bc 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -2,6 +2,8 @@ /****************************************************************************** * Xen balloon functionality */ +#ifndef _XEN_BALLOON_H +#define _XEN_BALLOON_H #define RETRY_UNLIMITED 0 @@ -34,3 +36,5 @@ static inline void xen_balloon_init(void) { } #endif + +#endif /* _XEN_BALLOON_H */ diff --git a/include/xen/xen.h b/include/xen/xen.h index 19a72f591e2bdc86bd0edc99a685def4157b2728..43efba045acc7e2a942e049d4b52b4c97244dc8e 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, extern u64 xen_saved_max_mem_size; #endif +#ifdef CONFIG_XEN_UNPOPULATED_ALLOC +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); +void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); +#else +#define xen_alloc_unpopulated_pages alloc_xenballooned_pages +#define xen_free_unpopulated_pages free_xenballooned_pages +#include +#endif + #endif /* _XEN_XEN_H */ diff --git a/init/initramfs.c b/init/initramfs.c index e6dbfb767057df7531fc6528899ec854d6bc77a2..1f97c0328a7ae7b7eaedb2ab0daa4d4e2c29354f 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -297,7 +297,7 @@ static void __init clean_path(char *path, umode_t fmode) { struct kstat st; - if (init_stat(path, &st, AT_SYMLINK_NOFOLLOW) && + if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) && (st.mode ^ fmode) & S_IFMT) { if (S_ISDIR(st.mode)) init_rmdir(path); diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index d1b8644bfb884c82dbd9f37a715ef25afe503aba..3f312bf2b11638f18cf8117d3ead4ae4a4533690 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -85,7 +85,7 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write, } static int proc_ipc_sem_dointvec(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { int ret, semmni; struct ipc_namespace *ns = current->nsproxy->ipc_ns; diff --git a/ipc/sem.c b/ipc/sem.c index 8c0244e0365eb65aa9270d7b5e3a510a70683337..f6c30a85dadf9c0a8a20addb2e1129dfea0ac531 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1691,7 +1691,7 @@ static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int v case IPC_SET: if (copy_semid_from_user(&semid64, p, version)) return -EFAULT; - /* fall through */ + fallthrough; case IPC_RMID: return semctl_down(ns, semid, cmd, &semid64); default: @@ -1805,7 +1805,7 @@ static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int vers case IPC_SET: if (copy_compat_semid_from_user(&semid64, p, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return semctl_down(ns, semid, cmd, &semid64); default: diff --git a/ipc/shm.c b/ipc/shm.c index f1ed36e3ac9fe3695cd23e0df5a483dfec3fdae9..e25c7c6106bcf79ee9dc18ecc133d3531ef2aecb 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1179,7 +1179,7 @@ static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int ver case IPC_SET: if (copy_shmid_from_user(&sem64, buf, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return shmctl_down(ns, shmid, cmd, &sem64); case SHM_LOCK: @@ -1374,7 +1374,7 @@ static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int versio case IPC_SET: if (copy_compat_shmid_from_user(&sem64, uptr, version)) return -EFAULT; - /* fallthru */ + fallthrough; case IPC_RMID: return shmctl_down(ns, shmid, cmd, &sem64); case SHM_LOCK: diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index a10e2997aa6c8afa6672737dc763377f97043edb..333b3bcfc5458fd4b5e45190c90edce17784c46e 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -681,7 +681,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) data->values[i] = AUDIT_UID_UNSET; break; } - /* fall through - if set */ + fallthrough; /* if set */ default: data->values[i] = f->val; } diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index b6715964b6856199a37e9c2bcb9223700603b555..8faa2ce893968f13ccdd91d5964de81a8ba6704a 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -67,6 +67,9 @@ static void bpf_iter_done_stop(struct seq_file *seq) iter_priv->done_stop = true; } +/* maximum visited objects before bailing out */ +#define MAX_ITER_OBJECTS 1000000 + /* bpf_seq_read, a customized and simpler version for bpf iterator. * no_llseek is assumed for this file. * The following are differences from seq_read(): @@ -79,7 +82,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size, { struct seq_file *seq = file->private_data; size_t n, offs, copied = 0; - int err = 0; + int err = 0, num_objs = 0; void *p; mutex_lock(&seq->lock); @@ -135,6 +138,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size, while (1) { loff_t pos = seq->index; + num_objs++; offs = seq->count; p = seq->op->next(seq, p, &seq->index); if (pos == seq->index) { @@ -153,6 +157,15 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size, if (seq->count >= size) break; + if (num_objs >= MAX_ITER_OBJECTS) { + if (offs == 0) { + err = -EAGAIN; + seq->op->stop(seq, p); + goto done; + } + break; + } + err = seq->op->show(seq, p); if (err > 0) { bpf_iter_dec_seq_num(seq); diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 83ff127ef7ae43f95b23db4e812b60ce114d0981..e21de4f1754c2c5fcd4eefa7c1703a935ea0420e 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1794,7 +1794,7 @@ static bool cg_sockopt_is_valid_access(int off, int size, return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; case offsetof(struct bpf_sockopt, optname): - /* fallthrough */ + fallthrough; case offsetof(struct bpf_sockopt, level): if (size != size_default) return false; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index f1c46529929bd397b7f23230e69eddae54eb79b9..6386b7bb98f2c6c7744296d469641323ce6f8130 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -279,7 +279,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_DROP: xdp_return_frame(xdpf); stats->drop++; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4fd830a62be2d003dc66e63f91116ebfc8dbc5b2..cfed0ac44d38c64b77cd7430a350f170ded15278 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -213,11 +213,13 @@ static int stack_map_get_build_id_32(void *page_addr, phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); - for (i = 0; i < ehdr->e_phnum; ++i) - if (phdr[i].p_type == PT_NOTE) - return stack_map_parse_build_id(page_addr, build_id, - page_addr + phdr[i].p_offset, - phdr[i].p_filesz); + for (i = 0; i < ehdr->e_phnum; ++i) { + if (phdr[i].p_type == PT_NOTE && + !stack_map_parse_build_id(page_addr, build_id, + page_addr + phdr[i].p_offset, + phdr[i].p_filesz)) + return 0; + } return -EINVAL; } @@ -236,11 +238,13 @@ static int stack_map_get_build_id_64(void *page_addr, phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); - for (i = 0; i < ehdr->e_phnum; ++i) - if (phdr[i].p_type == PT_NOTE) - return stack_map_parse_build_id(page_addr, build_id, - page_addr + phdr[i].p_offset, - phdr[i].p_filesz); + for (i = 0; i < ehdr->e_phnum; ++i) { + if (phdr[i].p_type == PT_NOTE && + !stack_map_parse_build_id(page_addr, build_id, + page_addr + phdr[i].p_offset, + phdr[i].p_filesz)) + return 0; + } return -EINVAL; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 86299a292214a863726fe1efcbc575cd599c90ae..b999e7ff2583b59b52cb864fd804c5f50b2065dd 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2029,7 +2029,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; - /* fallthrough */ + fallthrough; default: return 0; } @@ -2634,7 +2634,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, u32 ulen = info->raw_tracepoint.tp_name_len; size_t tp_len = strlen(tp_name); - if (ulen && !ubuf) + if (!ulen ^ !ubuf) return -EINVAL; info->raw_tracepoint.tp_name_len = tp_len + 1; diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 232df29793e962201aa4875d39b8cbcc4d295a16..99af4cea110253126e6a1c7069aef8c82cf535e6 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -29,8 +29,9 @@ static struct task_struct *task_seq_get_next(struct pid_namespace *ns, rcu_read_lock(); retry: - pid = idr_get_next(&ns->idr, tid); + pid = find_ge_pid(*tid, ns); if (pid) { + *tid = pid_nr_ns(pid, ns); task = get_pid_task(pid, PIDTYPE_PID); if (!task) { ++*tid; @@ -178,10 +179,11 @@ task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info, f = fcheck_files(curr_files, curr_fd); if (!f) continue; + if (!get_file_rcu(f)) + continue; /* set info->fd */ info->fd = curr_fd; - get_file(f); rcu_read_unlock(); return f; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ef938f17b94433ea820071fc9bea058eae6f9e48..47e74f09fa376f5995c7b5b9fc01d82dc8b33f5e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5236,7 +5236,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, off_reg == dst_reg ? dst : src); return -EACCES; } - /* fall-through */ + fallthrough; default: break; } @@ -10988,7 +10988,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) default: if (!prog_extension) return -EINVAL; - /* fallthrough */ + fallthrough; case BPF_MODIFY_RETURN: case BPF_LSM_MAC: case BPF_TRACE_FENTRY: diff --git a/kernel/capability.c b/kernel/capability.c index 1444f3954d750ba685b9423e94522e0243175f90..7c59b096c98aee0a1867ee7c450c9efc99d4e345 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -93,7 +93,7 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) break; case _LINUX_CAPABILITY_VERSION_2: warn_deprecated_v2(); - /* fall through - v3 is otherwise equivalent to v2. */ + fallthrough; /* v3 is otherwise equivalent to v2 */ case _LINUX_CAPABILITY_VERSION_3: *tocopy = _LINUX_CAPABILITY_U32S_3; break; diff --git a/kernel/compat.c b/kernel/compat.c index b8d2800bb4b719eb8ad5407823415fe1e0daa8d9..05adfd6fa8bf9dd1137e19520ea8f3b28c5434d2 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -255,11 +255,11 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat) return -EFAULT; switch (_NSIG_WORDS) { case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 ); - /* fall through */ + fallthrough; case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 ); - /* fall through */ + fallthrough; case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 ); - /* fall through */ + fallthrough; case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 ); } #else diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index a790026e42d01006d8c9107e0a6cb303ba818023..cc3c43dfec44937b3b92c70e4eaac20b30227474 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -1046,14 +1046,14 @@ int gdb_serial_stub(struct kgdb_state *ks) return DBG_PASS_EVENT; } #endif - /* Fall through */ + fallthrough; case 'C': /* Exception passing */ tmp = gdb_cmd_exception_pass(ks); if (tmp > 0) goto default_handle; if (tmp == 0) break; - /* Fall through - on tmp < 0 */ + fallthrough; /* on tmp < 0 */ case 'c': /* Continue packet */ case 's': /* Single step packet */ if (kgdb_contthread && kgdb_contthread != current) { @@ -1062,7 +1062,7 @@ int gdb_serial_stub(struct kgdb_state *ks) break; } dbg_activate_sw_breakpoints(); - /* Fall through - to default processing */ + fallthrough; /* to default processing */ default: default_handle: error = kgdb_arch_handle_exception(ks->ex_vector, diff --git a/kernel/debug/kdb/kdb_keyboard.c b/kernel/debug/kdb/kdb_keyboard.c index 750497b0003a6decd80a97051aec97347079fd93..f877a0a0d7cf2f36692433d258f663c2c25595ee 100644 --- a/kernel/debug/kdb/kdb_keyboard.c +++ b/kernel/debug/kdb/kdb_keyboard.c @@ -173,11 +173,11 @@ int kdb_get_kbd_char(void) case KT_LATIN: if (isprint(keychar)) break; /* printable characters */ - /* fall through */ + fallthrough; case KT_SPEC: if (keychar == K_ENTER) break; - /* fall through */ + fallthrough; default: return -1; /* ignore unprintables */ } diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 004c5b6c87f89a9a15abbf935c4a2f96ebb3c275..6226502ce04991d8ba1642ca19c97f37ad5df35f 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) *word = w8; break; } - /* fall through */ + fallthrough; default: diag = KDB_BADWIDTH; kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); @@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size) *word = w8; break; } - /* fall through */ + fallthrough; default: diag = KDB_BADWIDTH; kdb_printf("kdb_getword: bad width %ld\n", (long) size); @@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size) diag = kdb_putarea(addr, w8); break; } - /* fall through */ + fallthrough; default: diag = KDB_BADWIDTH; kdb_printf("kdb_putword: bad width %ld\n", (long) size); diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index bb0041e99659758a7cece5e857b2dfdb01a5d4cc..db6ef07aec3b37e52bcc2e920551c0769dc14bf7 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -43,7 +43,7 @@ u64 dma_direct_get_required_mask(struct device *dev) return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } -gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, +static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_limit) { u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); @@ -68,7 +68,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, return 0; } -bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); @@ -161,8 +161,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, size = PAGE_ALIGN(size); if (dma_should_alloc_from_pool(dev, gfp, attrs)) { - ret = dma_alloc_from_pool(dev, size, &page, gfp); - if (!ret) + u64 phys_mask; + + gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, + &phys_mask); + page = dma_alloc_from_pool(dev, size, &ret, gfp, + dma_coherent_ok); + if (!page) return NULL; goto done; } diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 6bc74a2d51273e3d32c841844f8f0f8f2eac079d..1281c0f0442bc51018b76b2b2e86a79be65829dd 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -3,7 +3,9 @@ * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2020 Google LLC */ +#include #include +#include #include #include #include @@ -55,11 +57,34 @@ static void dma_atomic_pool_size_add(gfp_t gfp, size_t size) pool_size_kernel += size; } +static bool cma_in_zone(gfp_t gfp) +{ + unsigned long size; + phys_addr_t end; + struct cma *cma; + + cma = dev_get_cma_area(NULL); + if (!cma) + return false; + + size = cma_get_size(cma); + if (!size) + return false; + + /* CMA can't cross zone boundaries, see cma_activate_area() */ + end = cma_get_base(cma) + size - 1; + if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) + return end <= DMA_BIT_MASK(zone_dma_bits); + if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) + return end <= DMA_BIT_MASK(32); + return true; +} + static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, gfp_t gfp) { unsigned int order; - struct page *page; + struct page *page = NULL; void *addr; int ret = -ENOMEM; @@ -68,7 +93,11 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, do { pool_size = 1 << (PAGE_SHIFT + order); - page = alloc_pages(gfp, order); + if (cma_in_zone(gfp)) + page = dma_alloc_from_contiguous(NULL, 1 << order, + order, false); + if (!page) + page = alloc_pages(gfp, order); } while (!page && order-- > 0); if (!page) goto out; @@ -196,93 +225,75 @@ static int __init dma_atomic_pool_init(void) } postcore_initcall(dma_atomic_pool_init); -static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev) +static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) { - u64 phys_mask; - gfp_t gfp; - - gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, - &phys_mask); - if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA) + if (prev == NULL) { + if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) + return atomic_pool_dma32; + if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) + return atomic_pool_dma; + return atomic_pool_kernel; + } + if (prev == atomic_pool_kernel) + return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma; + if (prev == atomic_pool_dma32) return atomic_pool_dma; - if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32) - return atomic_pool_dma32; - return atomic_pool_kernel; + return NULL; } -static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool) +static struct page *__dma_alloc_from_pool(struct device *dev, size_t size, + struct gen_pool *pool, void **cpu_addr, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)) { - if (bad_pool == atomic_pool_kernel) - return atomic_pool_dma32 ? : atomic_pool_dma; + unsigned long addr; + phys_addr_t phys; - if (bad_pool == atomic_pool_dma32) - return atomic_pool_dma; + addr = gen_pool_alloc(pool, size); + if (!addr) + return NULL; - return NULL; -} + phys = gen_pool_virt_to_phys(pool, addr); + if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) { + gen_pool_free(pool, addr, size); + return NULL; + } -static inline struct gen_pool *dma_guess_pool(struct device *dev, - struct gen_pool *bad_pool) -{ - if (bad_pool) - return dma_get_safer_pool(bad_pool); + if (gen_pool_avail(pool) < atomic_pool_size) + schedule_work(&atomic_pool_work); - return dma_guess_pool_from_device(dev); + *cpu_addr = (void *)addr; + memset(*cpu_addr, 0, size); + return pfn_to_page(__phys_to_pfn(phys)); } -void *dma_alloc_from_pool(struct device *dev, size_t size, - struct page **ret_page, gfp_t flags) +struct page *dma_alloc_from_pool(struct device *dev, size_t size, + void **cpu_addr, gfp_t gfp, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)) { struct gen_pool *pool = NULL; - unsigned long val = 0; - void *ptr = NULL; - phys_addr_t phys; - - while (1) { - pool = dma_guess_pool(dev, pool); - if (!pool) { - WARN(1, "Failed to get suitable pool for %s\n", - dev_name(dev)); - break; - } - - val = gen_pool_alloc(pool, size); - if (!val) - continue; - - phys = gen_pool_virt_to_phys(pool, val); - if (dma_coherent_ok(dev, phys, size)) - break; - - gen_pool_free(pool, val, size); - val = 0; - } - - - if (val) { - *ret_page = pfn_to_page(__phys_to_pfn(phys)); - ptr = (void *)val; - memset(ptr, 0, size); + struct page *page; - if (gen_pool_avail(pool) < atomic_pool_size) - schedule_work(&atomic_pool_work); + while ((pool = dma_guess_pool(pool, gfp))) { + page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, + phys_addr_ok); + if (page) + return page; } - return ptr; + WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); + return NULL; } bool dma_free_from_pool(struct device *dev, void *start, size_t size) { struct gen_pool *pool = NULL; - while (1) { - pool = dma_guess_pool(dev, pool); - if (!pool) - return false; - - if (gen_pool_has_addr(pool, (unsigned long)start, size)) { - gen_pool_free(pool, (unsigned long)start, size); - return true; - } + while ((pool = dma_guess_pool(pool, 0))) { + if (!gen_pool_has_addr(pool, (unsigned long)start, size)) + continue; + gen_pool_free(pool, (unsigned long)start, size); + return true; } + + return false; } diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 9852e0d62d95ccd572c4fa648ee6da1063afbedd..18683598edbc642addd43961e2dc5d5e2dc95135 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -65,25 +65,49 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall, syscall_enter_audit(regs, syscall); - return ret ? : syscall; + /* The above might have changed the syscall number */ + return ret ? : syscall_get_nr(current, regs); } -noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) +static __always_inline long +__syscall_enter_from_user_work(struct pt_regs *regs, long syscall) { unsigned long ti_work; - enter_from_user_mode(regs); - instrumentation_begin(); - - local_irq_enable(); ti_work = READ_ONCE(current_thread_info()->flags); if (ti_work & SYSCALL_ENTER_WORK) syscall = syscall_trace_enter(regs, syscall, ti_work); - instrumentation_end(); return syscall; } +long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) +{ + return __syscall_enter_from_user_work(regs, syscall); +} + +noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall) +{ + long ret; + + enter_from_user_mode(regs); + + instrumentation_begin(); + local_irq_enable(); + ret = __syscall_enter_from_user_work(regs, syscall); + instrumentation_end(); + + return ret; +} + +noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) +{ + enter_from_user_mode(regs); + instrumentation_begin(); + local_irq_enable(); + instrumentation_end(); +} + /** * exit_to_user_mode - Fixup state when exiting to user mode * diff --git a/kernel/events/core.c b/kernel/events/core.c index 5bfe8e3c6e443d8102abdbb67f39bcd6d2b4902e..7ed5248f0445eadeaac5711a25523978b47aae7e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10034,7 +10034,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, case IF_SRC_KERNELADDR: case IF_SRC_KERNEL: kernel = 1; - /* fall through */ + fallthrough; case IF_SRC_FILEADDR: case IF_SRC_FILE: diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 649fd53dc9ad047d24144e61275cfb9bd124e9d8..0e18aaf23a7b48c5d28739e78bb1b1d5b00f7978 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -205,7 +205,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, try_to_free_swap(old_page); page_vma_mapped_walk_done(&pvmw); - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page)) munlock_vma_page(old_page); put_page(old_page); diff --git a/kernel/fork.c b/kernel/fork.c index 4d32190861bdc6500730bf296b2f79f7a51ceacf..49677d668de4da7b5115b6a9d8caacaf643fceba 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -3014,7 +3014,7 @@ int unshare_files(struct files_struct **displaced) } int sysctl_max_threads(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int ret; diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 908fdf5098c32e088e1adff65d2c4f3f2472ecad..53c67c87f141bb483e1baa7a20546dac9c997263 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index a8e14c80b405bdcf37cd37fb9b74250660513b1c..762a928e18f925074ce9e57672f79c91761d5819 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -173,7 +173,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags __irq_wake_thread(desc, action); - /* Fall through - to add to randomness */ + fallthrough; /* to add to randomness */ case IRQ_HANDLED: *flags |= action->flags; break; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 52ac5391dcc6c5067b222500ebc3f39b1bdd2139..5df903fccb6094b93690cb909afc578743d972e7 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -271,7 +271,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: cpumask_copy(desc->irq_common_data.affinity, mask); - /* fall through */ + fallthrough; case IRQ_SET_MASK_OK_NOCOPY: irq_validate_effective_affinity(data); irq_set_thread_affinity(desc); @@ -868,7 +868,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) case IRQ_SET_MASK_OK_DONE: irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); - /* fall through */ + fallthrough; case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 30cc217b863184989987f2fa76c76c8bc9a082b4..651a4ad6d711f82d8192d07a6b7616ef9337651f 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, unsigned int cpu, bit; struct cpumap *cm; + /* + * Not required in theory, but matrix_find_best_cpu() uses + * for_each_cpu() which ignores the cpumask on UP . + */ + if (cpumask_empty(msk)) + return -EINVAL; + cpu = matrix_find_best_cpu(m, msk); if (cpu == UINT_MAX) return -ENOSPC; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 95cb74f7329255da1bede48b22559d2e39ac76a0..4fb15fa96734b464b9f0ee53282bafabc8b90cea 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -684,12 +684,12 @@ bool kallsyms_show_value(const struct cred *cred) case 0: if (kallsyms_for_perf()) return true; - /* fallthrough */ + fallthrough; case 1: if (security_capable(cred, &init_user_ns, CAP_SYSLOG, CAP_OPT_NOAUDIT) == 0) return true; - /* fallthrough */ + fallthrough; default: return false; } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 2fad21d345b0e4b635ae4bd4f55b216a5951d288..54b74fabf40c7bd88633b717423403add6911d8c 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) skip_checks: /* we'll do an OFF -> ON transition: */ - this_cpu_write(hardirqs_enabled, 1); + __this_cpu_write(hardirqs_enabled, 1); trace->hardirq_enable_ip = ip; trace->hardirq_enable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_on_events); @@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) /* * We have done an ON -> OFF transition: */ - this_cpu_write(hardirqs_enabled, 0); + __this_cpu_write(hardirqs_enabled, 0); trace->hardirq_disable_ip = ip; trace->hardirq_disable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_off_events); @@ -4977,6 +4977,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, { unsigned long flags; + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); + if (unlikely(current->lockdep_recursion)) { /* XXX allow trylock from NMI ?!? */ if (lockdep_nmi() && !trylock) { @@ -5001,7 +5003,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, check_flags(flags); current->lockdep_recursion++; - trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0, 0); lockdep_recursion_finish(); @@ -5013,13 +5014,15 @@ void lock_release(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; + trace_lock_release(lock, ip); + if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); + current->lockdep_recursion++; - trace_lock_release(lock, ip); if (__lock_release(lock, ip)) check_chain_key(current); lockdep_recursion_finish(); @@ -5205,8 +5208,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) hlock->holdtime_stamp = now; } - trace_lock_acquired(lock, ip); - stats = get_lock_stats(hlock_class(hlock)); if (waittime) { if (hlock->read) @@ -5225,6 +5226,8 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; + trace_lock_acquired(lock, ip); + if (unlikely(!lock_stat || !debug_locks)) return; @@ -5234,7 +5237,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion++; - trace_lock_contended(lock, ip); __lock_contended(lock, ip); lockdep_recursion_finish(); raw_local_irq_restore(flags); @@ -5245,6 +5247,8 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; + trace_lock_contended(lock, ip); + if (unlikely(!lock_stat || !debug_locks)) return; diff --git a/kernel/padata.c b/kernel/padata.c index 16cb894dc272b13a5501ace7d512b7ccd6c44ed6..d4d3ba6e1728a2a00b5f30d53fbbfe0a8d07829e 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -215,12 +215,13 @@ int padata_do_parallel(struct padata_shell *ps, padata->pd = pd; padata->cb_cpu = *cb_cpu; - rcu_read_unlock_bh(); - spin_lock(&padata_works_lock); padata->seq_nr = ++pd->seq_nr; pw = padata_work_alloc(); spin_unlock(&padata_works_lock); + + rcu_read_unlock_bh(); + if (pw) { padata_work_init(pw, padata_parallel_worker, padata, 0); queue_work(pinst->parallel_wq, &pw->pw_work); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index f33769f97aca59c960d188e24a0a577eaf9c5499..e7aa57fb2fdc33a587befb1c5a2d668224961bdc 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -659,7 +659,7 @@ static void power_down(void) break; case HIBERNATION_PLATFORM: hibernation_platform_enter(); - /* Fall through */ + fallthrough; case HIBERNATION_SHUTDOWN: if (pm_power_off) kernel_power_off(); diff --git a/kernel/power/qos.c b/kernel/power/qos.c index db0bed2cae26f1d350a620637a21313466b064cf..ec7e1e85923e43f60ea3c5007cdd8035c9cc0596 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -119,7 +119,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, * and add, then see if the aggregate has changed. */ plist_del(node, &c->list); - /* fall through */ + fallthrough; case PM_QOS_ADD_REQ: plist_node_init(node, new_value); plist_add(node, &c->list); @@ -188,7 +188,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf, break; case PM_QOS_UPDATE_REQ: pm_qos_flags_remove_req(pqf, req); - /* fall through */ + fallthrough; case PM_QOS_ADD_REQ: req->flags = val; INIT_LIST_HEAD(&req->node); diff --git a/kernel/relay.c b/kernel/relay.c index 72fe443ea78f0f7cc8f2668b356ddf4c7d9f2bd8..fb4e0c530c080992ba8eeb8d84422439acc13308 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -197,6 +197,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); + free_percpu(chan->buf); kfree(chan); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8471a0f7eb322844d6157e320cedc06f31ec77e5..2d95dc3f46444ece3e21feee786be746c131b512 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2320,7 +2320,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) state = possible; break; } - /* Fall-through */ + fallthrough; case possible: do_set_cpus_allowed(p, cpu_possible_mask); state = fail; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 6bf34986f45ce9bda76caca5e3d5744963f3056f..f324dc36fc43de36949d7a0bce547bd3add7c12b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -54,17 +54,18 @@ __setup("hlt", cpu_idle_nopoll_setup); static noinline int __cpuidle cpu_idle_poll(void) { + trace_cpu_idle(0, smp_processor_id()); + stop_critical_timings(); rcu_idle_enter(); - trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); - stop_critical_timings(); while (!tif_need_resched() && - (cpu_idle_force_poll || tick_check_broadcast_expired())) + (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); - start_critical_timings(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); + rcu_idle_exit(); + start_critical_timings(); + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); return 1; } @@ -90,9 +91,14 @@ void __cpuidle default_idle_call(void) if (current_clr_polling_and_test()) { local_irq_enable(); } else { + + trace_cpu_idle(1, smp_processor_id()); stop_critical_timings(); + rcu_idle_enter(); arch_cpu_idle(); + rcu_idle_exit(); start_critical_timings(); + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); } } @@ -158,7 +164,6 @@ static void cpuidle_idle_call(void) if (cpuidle_not_available(drv, dev)) { tick_nohz_idle_stop_tick(); - rcu_idle_enter(); default_idle_call(); goto exit_idle; @@ -178,21 +183,17 @@ static void cpuidle_idle_call(void) u64 max_latency_ns; if (idle_should_enter_s2idle()) { - rcu_idle_enter(); entered_state = call_cpuidle_s2idle(drv, dev); if (entered_state > 0) goto exit_idle; - rcu_idle_exit(); - max_latency_ns = U64_MAX; } else { max_latency_ns = dev->forced_idle_latency_limit_ns; } tick_nohz_idle_stop_tick(); - rcu_idle_enter(); next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); call_cpuidle(drv, dev, next_state); @@ -209,8 +210,6 @@ static void cpuidle_idle_call(void) else tick_nohz_idle_retain_tick(); - rcu_idle_enter(); - entered_state = call_cpuidle(drv, dev, next_state); /* * Give the governor an opportunity to reflect on the outcome @@ -226,8 +225,6 @@ static void cpuidle_idle_call(void) */ if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); - - rcu_idle_exit(); } /* diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 007b0a6b015277967ffb9a179e671fb48230d147..1bd7e3af904f6d2724c260483808107d1898a568 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1219,13 +1219,13 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what, case sa_rootdomain: if (!atomic_read(&d->rd->refcount)) free_rootdomain(&d->rd->rcu); - /* Fall through */ + fallthrough; case sa_sd: free_percpu(d->sd); - /* Fall through */ + fallthrough; case sa_sd_storage: __sdt_free(cpu_map); - /* Fall through */ + fallthrough; case sa_none: break; } diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 3ee59ce0a323dcad450961a2de31845646f57c1f..676d4af621038afb0764f720df445c589961b986 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -1109,13 +1109,18 @@ static long seccomp_set_mode_strict(void) } #ifdef CONFIG_SECCOMP_FILTER -static int seccomp_notify_release(struct inode *inode, struct file *file) +static void seccomp_notify_free(struct seccomp_filter *filter) +{ + kfree(filter->notif); + filter->notif = NULL; +} + +static void seccomp_notify_detach(struct seccomp_filter *filter) { - struct seccomp_filter *filter = file->private_data; struct seccomp_knotif *knotif; if (!filter) - return 0; + return; mutex_lock(&filter->notify_lock); @@ -1139,9 +1144,15 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) complete(&knotif->ready); } - kfree(filter->notif); - filter->notif = NULL; + seccomp_notify_free(filter); mutex_unlock(&filter->notify_lock); +} + +static int seccomp_notify_release(struct inode *inode, struct file *file) +{ + struct seccomp_filter *filter = file->private_data; + + seccomp_notify_detach(filter); __put_seccomp_filter(filter); return 0; } @@ -1488,7 +1499,7 @@ static struct file *init_listener(struct seccomp_filter *filter) out_notif: if (IS_ERR(ret)) - kfree(filter->notif); + seccomp_notify_free(filter); out: return ret; } @@ -1581,6 +1592,7 @@ static long seccomp_set_mode_filter(unsigned int flags, listener_f->private_data = NULL; fput(listener_f); put_unused_fd(listener); + seccomp_notify_detach(prepared); } else { fd_install(listener, listener_f); ret = listener; diff --git a/kernel/signal.c b/kernel/signal.c index 42b67d2cea370af1eb6b40ee76d7ff5cc094e93a..a38b3edc68511c3419a68b96d9867f000bb18d83 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -851,7 +851,7 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info, */ if (!sid || sid == task_session(current)) break; - /* fall through */ + fallthrough; default: return -EPERM; } diff --git a/kernel/sys.c b/kernel/sys.c index ca11af9d815d9d2490e224920df54e77f0fbf92b..ab6c409b1159b1538437198d5a7745766f45e906 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1753,7 +1753,7 @@ void getrusage(struct task_struct *p, int who, struct rusage *r) if (who == RUSAGE_CHILDREN) break; - /* fall through */ + fallthrough; case RUSAGE_SELF: thread_group_cputime_adjusted(p, &tgutime, &tgstime); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 287862f91717a67eb4e13f11d7486dfad42fbe96..09e70ee2332ed843f9c52a7949e961c5a595e6e2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -204,8 +204,7 @@ static int max_extfrag_threshold = 1000; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL) static int bpf_stats_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { struct static_key *key = (struct static_key *)table->data; static int saved_val; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index c4038511d5c9e3fad13f06dd04fd7ce24f7a653a..95b6a708b0406b717c130b511d964ebcddc09a2e 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -377,7 +377,7 @@ static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) switch (state) { case ODEBUG_STATE_ACTIVE: WARN_ON(1); - /* fall through */ + fallthrough; default: return false; } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 07709ac30439a8e210162535fac7a4be378a0f9a..bf540f5a4115a2bff84ee7c2eb9f1af21385e1cd 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -439,12 +439,12 @@ static struct pid *good_sigevent(sigevent_t * event) rtn = pid_task(pid, PIDTYPE_PID); if (!rtn || !same_thread_group(rtn, current)) return NULL; - /* FALLTHRU */ + fallthrough; case SIGEV_SIGNAL: case SIGEV_THREAD: if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) return NULL; - /* FALLTHRU */ + fallthrough; case SIGEV_NONE: return pid; default: diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index e51778c312f1c51e722a3aa914be71ca94c42885..36d7464c8962543d6b29fa43f3c70d28c3c41362 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -381,7 +381,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) switch (mode) { case TICK_BROADCAST_FORCE: tick_broadcast_forced = 1; - /* fall through */ + fallthrough; case TICK_BROADCAST_ON: cpumask_set_cpu(cpu, tick_broadcast_on); if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { diff --git a/kernel/time/timer.c b/kernel/time/timer.c index a16764b0116e32e821bb80eb01956d9972158b0f..a50364df10543912c0bfc9f6f40d4c0470de5cc5 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -666,7 +666,7 @@ static bool timer_fixup_activate(void *addr, enum debug_obj_state state) case ODEBUG_STATE_ACTIVE: WARN_ON(1); - /* fall through */ + fallthrough; default: return false; } diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7ba62d68885aec065731d22ebb267d5a26fac191..4b3a42fc3b24f12a7d70ecb17217485d9539733c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -745,7 +745,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) #endif case BLKTRACESTART: start = 1; - /* fall through */ + fallthrough; case BLKTRACESTOP: ret = __blk_trace_startstop(q, start); break; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index bf44f6bbd0c36c896f7110840043f9bd2befc1fd..78a678eeb140935354283529066bc75c4fd4863e 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -499,7 +499,7 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, ptr++; break; } - /* fall through */ + fallthrough; default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); @@ -1273,7 +1273,7 @@ static int parse_pred(const char *str, void *data, switch (op) { case OP_NE: pred->not = 1; - /* Fall through */ + fallthrough; case OP_GLOB: case OP_EQ: break; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index f74020f6bd9d5d87df96099b8df2a1d4ab4aa98e..0ef8f65bd2d71188bbe314b11eda366fe87ff889 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -393,6 +393,7 @@ static void free_watch(struct rcu_head *rcu) struct watch *watch = container_of(rcu, struct watch, rcu); put_watch_queue(rcu_access_pointer(watch->queue)); + atomic_dec(&watch->cred->user->nr_watches); put_cred(watch->cred); } @@ -452,6 +453,13 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) watch->cred = get_current_cred(); rcu_assign_pointer(watch->watch_list, wlist); + if (atomic_inc_return(&watch->cred->user->nr_watches) > + task_rlimit(current, RLIMIT_NOFILE)) { + atomic_dec(&watch->cred->user->nr_watches); + put_cred(watch->cred); + return -EAGAIN; + } + spin_lock_bh(&wqueue->lock); kref_get(&wqueue->usage); kref_get(&watch->usage); diff --git a/lib/Makefile b/lib/Makefile index e290fc5707ea656c97da456b91c58913477a7d3c..a4a4c6864f51814f6fbc3b90b3c59b04d71471b1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -15,11 +15,16 @@ KCOV_INSTRUMENT_debugobjects.o := n KCOV_INSTRUMENT_dynamic_debug.o := n KCOV_INSTRUMENT_fault-inject.o := n +# string.o implements standard library functions like memset/memcpy etc. +# Use -ffreestanding to ensure that the compiler does not try to "optimize" +# them into calls to themselves. +CFLAGS_string.o := -ffreestanding + # Early boot use of cmdline, don't instrument it ifdef CONFIG_AMD_MEM_ENCRYPT KASAN_SANITIZE_string.o := n -CFLAGS_string.o := -fno-stack-protector +CFLAGS_string.o += -fno-stack-protector endif # Used by KCSAN while enabled, avoid recursion. diff --git a/lib/bootconfig.c b/lib/bootconfig.c index a5f701161f6bd13ce285729256bbbafa2882cb47..2c905a91d4ebe51d81574764b6a8c6a29cc4c693 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -817,7 +817,7 @@ int __init xbc_init(char *buf, const char **emsg, int *epos) q - 2); break; } - /* Fall through */ + /* fall through */ case '=': ret = xbc_parse_kv(&p, q, c); break; diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 1d012e597cc3f9753820b94a5dfc045a7747e908..2d4dfd44b0fa5fdf618dfb5c16a32e53f68e4eea 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -353,8 +353,7 @@ static int check_set(const char **dest, char *src, char *name) /* * Parse words[] as a ddebug query specification, which is a series - * of (keyword, value) pairs or combined keyword=value terms, - * chosen from these possibilities: + * of (keyword, value) pairs chosen from these possibilities: * * func * file @@ -373,34 +372,22 @@ static int ddebug_parse_query(char *words[], int nwords, unsigned int i; int rc = 0; char *fline; - char *keyword, *arg; + + /* check we have an even number of words */ + if (nwords % 2 != 0) { + pr_err("expecting pairs of match-spec \n"); + return -EINVAL; + } if (modname) /* support $modname.dyndbg= */ query->module = modname; - for (i = 0; i < nwords; i++) { - /* accept keyword=arg */ - vpr_info("%d w:%s\n", i, words[i]); - - keyword = words[i]; - arg = strchr(keyword, '='); - if (arg) { - *arg++ = '\0'; - } else { - i++; /* next word is arg */ - if (!(i < nwords)) { - pr_err("missing arg to keyword: %s\n", keyword); - return -EINVAL; - } - arg = words[i]; - } - vpr_info("%d key:%s arg:%s\n", i, keyword, arg); - - if (!strcmp(keyword, "func")) { - rc = check_set(&query->function, arg, "func"); - } else if (!strcmp(keyword, "file")) { - if (check_set(&query->filename, arg, "file")) + for (i = 0; i < nwords; i += 2) { + if (!strcmp(words[i], "func")) { + rc = check_set(&query->function, words[i+1], "func"); + } else if (!strcmp(words[i], "file")) { + if (check_set(&query->filename, words[i+1], "file")) return -EINVAL; /* tail :$info is function or line-range */ @@ -416,18 +403,18 @@ static int ddebug_parse_query(char *words[], int nwords, if (parse_linerange(query, fline)) return -EINVAL; } - } else if (!strcmp(keyword, "module")) { - rc = check_set(&query->module, arg, "module"); - } else if (!strcmp(keyword, "format")) { - string_unescape_inplace(arg, UNESCAPE_SPACE | + } else if (!strcmp(words[i], "module")) { + rc = check_set(&query->module, words[i+1], "module"); + } else if (!strcmp(words[i], "format")) { + string_unescape_inplace(words[i+1], UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_SPECIAL); - rc = check_set(&query->format, arg, "format"); - } else if (!strcmp(keyword, "line")) { - if (parse_linerange(query, arg)) + rc = check_set(&query->format, words[i+1], "format"); + } else if (!strcmp(words[i], "line")) { + if (parse_linerange(query, words[i+1])) return -EINVAL; } else { - pr_err("unknown keyword \"%s\"\n", keyword); + pr_err("unknown keyword \"%s\"\n", words[i]); return -EINVAL; } if (rc) @@ -525,7 +512,7 @@ static int ddebug_exec_query(char *query_string, const char *modname) last error or number of matching callsites. Module name is either in param (for boot arg) or perhaps in query string. */ -int ddebug_exec_queries(char *query, const char *modname) +static int ddebug_exec_queries(char *query, const char *modname) { char *split; int i, errs = 0, exitcode = 0, rc, nfound = 0; @@ -557,7 +544,30 @@ int ddebug_exec_queries(char *query, const char *modname) return exitcode; return nfound; } -EXPORT_SYMBOL_GPL(ddebug_exec_queries); + +/** + * dynamic_debug_exec_queries - select and change dynamic-debug prints + * @query: query-string described in admin-guide/dynamic-debug-howto + * @modname: string containing module name, usually &module.mod_name + * + * This uses the >/proc/dynamic_debug/control reader, allowing module + * authors to modify their dynamic-debug callsites. The modname is + * canonically struct module.mod_name, but can also be null or a + * module-wildcard, for example: "drm*". + */ +int dynamic_debug_exec_queries(const char *query, const char *modname) +{ + int rc; + char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL); + + if (!query) + return -ENOMEM; + + rc = ddebug_exec_queries(qry, modname); + kfree(qry); + return rc; +} +EXPORT_SYMBOL_GPL(dynamic_debug_exec_queries); #define PREFIX_SIZE 64 @@ -947,7 +957,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, list_add(&dt->link, &ddebug_tables); mutex_unlock(&ddebug_lock); - v2pr_info("%u debug prints in module %s\n", n, dt->mod_name); + v2pr_info("%3u debug prints in module %s\n", n, dt->mod_name); return 0; } diff --git a/lib/glob.c b/lib/glob.c index 0ba3ea86b5466c102d588327dad3e7e8df092f5d..52e3ed7e4a9b8d331148a7dec6a640c43b0104ba 100644 --- a/lib/glob.c +++ b/lib/glob.c @@ -102,7 +102,7 @@ bool __pure glob_match(char const *pat, char const *str) break; case '\\': d = *pat++; - /*FALLTHROUGH*/ + /* fall through */ default: /* Literal character */ literal: if (c == d) { diff --git a/lib/kobject.c b/lib/kobject.c index 3afb939f2a1cc5a5a04a4c2da71f1f35315b5c0a..ea53b30cf4837969326dac3282e1fd364edbd3a9 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -604,9 +604,6 @@ static void __kobject_del(struct kobject *kobj) struct kernfs_node *sd; const struct kobj_type *ktype; - if (!kobj) - return; - sd = kobj->sd; ktype = get_ktype(kobj); @@ -637,8 +634,12 @@ static void __kobject_del(struct kobject *kobj) */ void kobject_del(struct kobject *kobj) { - struct kobject *parent = kobj->parent; + struct kobject *parent; + + if (!kobj) + return; + parent = kobj->parent; __kobject_del(kobj); kobject_put(parent); } diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 9fee2b93a8d18c0d2a343ec4e3d3fd173b3c62d2..06c95505775644d44af3dde867d72131128ee07e 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -26,6 +26,8 @@ #include #include +MODULE_IMPORT_NS(TEST_FIRMWARE); + #define TEST_FIRMWARE_NAME "test-firmware.bin" #define TEST_FIRMWARE_NUM_REQS 4 #define TEST_FIRMWARE_BUF_SIZE SZ_1K @@ -489,6 +491,9 @@ static ssize_t trigger_request_store(struct device *dev, static DEVICE_ATTR_WO(trigger_request); #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE +extern struct list_head efi_embedded_fw_list; +extern bool efi_embedded_fw_checked; + static ssize_t trigger_request_platform_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -501,6 +506,7 @@ static ssize_t trigger_request_platform_store(struct device *dev, }; struct efi_embedded_fw efi_embedded_fw; const struct firmware *firmware = NULL; + bool saved_efi_embedded_fw_checked; char *name; int rc; @@ -513,6 +519,8 @@ static ssize_t trigger_request_platform_store(struct device *dev, efi_embedded_fw.data = (void *)test_data; efi_embedded_fw.length = sizeof(test_data); list_add(&efi_embedded_fw.list, &efi_embedded_fw_list); + saved_efi_embedded_fw_checked = efi_embedded_fw_checked; + efi_embedded_fw_checked = true; pr_info("loading '%s'\n", name); rc = firmware_request_platform(&firmware, name, dev); @@ -530,6 +538,7 @@ static ssize_t trigger_request_platform_store(struct device *dev, rc = count; out: + efi_embedded_fw_checked = saved_efi_embedded_fw_checked; release_firmware(firmware); list_del(&efi_embedded_fw.list); kfree(name); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index c155769559abe27064e609ee80ff60abda8122b9..afb9521ddf91975aa6ebb2e853712f50f3f94cc3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1681,7 +1681,8 @@ char *uuid_string(char *buf, char *end, const u8 *addr, switch (*(++fmt)) { case 'L': - uc = true; /* fall-through */ + uc = true; + /* fall through */ case 'l': index = guid_index; break; @@ -2218,7 +2219,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'S': case 's': ptr = dereference_symbol_descriptor(ptr); - /* Fallthrough */ + /* fall through */ case 'B': return symbol_string(buf, end, ptr, spec, fmt); case 'R': @@ -2467,7 +2468,7 @@ int format_decode(const char *fmt, struct printf_spec *spec) * utility, treat it as any other invalid or * unsupported format specifier. */ - /* Fall-through */ + /* fall through */ default: WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt); diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c index 9f336bc07ed610f6944e6617439dd7fd0e51c9e0..65a1aad8c223b95bf442519c9aa12bfd665dccc3 100644 --- a/lib/xz/xz_dec_lzma2.c +++ b/lib/xz/xz_dec_lzma2.c @@ -1043,7 +1043,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, s->lzma2.sequence = SEQ_LZMA_PREPARE; - /* Fall through */ + /* fall through */ case SEQ_LZMA_PREPARE: if (s->lzma2.compressed < RC_INIT_BYTES) @@ -1055,7 +1055,7 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, s->lzma2.compressed -= RC_INIT_BYTES; s->lzma2.sequence = SEQ_LZMA_RUN; - /* Fall through */ + /* fall through */ case SEQ_LZMA_RUN: /* diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c index bd1d182419d7e2c8c12926ac7a69818e45f116a6..32ab2a08b7cbc9ff5a874d1ffe5cfd12d221e901 100644 --- a/lib/xz/xz_dec_stream.c +++ b/lib/xz/xz_dec_stream.c @@ -583,7 +583,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) if (ret != XZ_OK) return ret; - /* Fall through */ + /* fall through */ case SEQ_BLOCK_START: /* We need one byte of input to continue. */ @@ -608,7 +608,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->temp.pos = 0; s->sequence = SEQ_BLOCK_HEADER; - /* Fall through */ + /* fall through */ case SEQ_BLOCK_HEADER: if (!fill_temp(s, b)) @@ -620,7 +620,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_UNCOMPRESS; - /* Fall through */ + /* fall through */ case SEQ_BLOCK_UNCOMPRESS: ret = dec_block(s, b); @@ -629,7 +629,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_PADDING; - /* Fall through */ + /* fall through */ case SEQ_BLOCK_PADDING: /* @@ -651,7 +651,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_BLOCK_CHECK; - /* Fall through */ + /* fall through */ case SEQ_BLOCK_CHECK: if (s->check_type == XZ_CHECK_CRC32) { @@ -675,7 +675,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_INDEX_PADDING; - /* Fall through */ + /* fall through */ case SEQ_INDEX_PADDING: while ((s->index.size + (b->in_pos - s->in_start)) @@ -699,7 +699,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->sequence = SEQ_INDEX_CRC32; - /* Fall through */ + /* fall through */ case SEQ_INDEX_CRC32: ret = crc32_validate(s, b); @@ -709,7 +709,7 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) s->temp.size = STREAM_HEADER_SIZE; s->sequence = SEQ_STREAM_FOOTER; - /* Fall through */ + /* fall through */ case SEQ_STREAM_FOOTER: if (!fill_temp(s, b)) diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c index 269ee9a796c1a7d2e09be1c194e0f22ec2d06648..db6761ea4deb5949a4c41249ae48145a8ebfc8fb 100644 --- a/lib/zstd/decompress.c +++ b/lib/zstd/decompress.c @@ -442,7 +442,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize case set_repeat: if (dctx->litEntropy == 0) return ERROR(dictionary_corrupted); - /* fall-through */ + /* fall through */ case set_compressed: if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ @@ -2309,7 +2309,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB switch (zds->stage) { case zdss_init: ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ - /* fall-through */ + /* fall through */ case zdss_loadHeader: { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); @@ -2376,7 +2376,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB } zds->stage = zdss_read; } - /* fall through */ + /* fall through */ case zdss_read: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); @@ -2405,7 +2405,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB zds->stage = zdss_load; /* pass-through */ } - /* fall through */ + /* fall through */ case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); @@ -2438,7 +2438,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB /* pass-through */ } } - /* fall through */ + /* fall through */ case zdss_flush: { size_t const toFlushSize = zds->outEnd - zds->outStart; diff --git a/mm/gup.c b/mm/gup.c index ae096ea7583fe72bfebfd2378914e04901d1d4a4..e5739a1974d5da8c708bb2ddb35a5852545ab8d2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -381,22 +381,13 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, } /* - * FOLL_FORCE or a forced COW break can write even to unwritable pte's, - * but only after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE can write to even unwritable pte's, but only + * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { - return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); -} - -/* - * A (separate) COW fault might break the page the other way and - * get_user_pages() would return the page from what is now the wrong - * VM. So we need to force a COW break at GUP time even for reads. - */ -static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) -{ - return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN)); + return pte_write(pte) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, @@ -843,7 +834,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, goto unmap; *page = pte_page(*pte); } - if (unlikely(!try_get_page(*page))) { + if (unlikely(!try_grab_page(*page, gup_flags))) { ret = -ENOMEM; goto unmap; } @@ -1067,11 +1058,9 @@ static long __get_user_pages(struct mm_struct *mm, goto out; } if (is_vm_hugetlb_page(vma)) { - if (should_force_cow_break(vma, foll_flags)) - foll_flags |= FOLL_WRITE; i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, - foll_flags, locked); + gup_flags, locked); if (locked && *locked == 0) { /* * We've got a VM_FAULT_RETRY @@ -1085,10 +1074,6 @@ static long __get_user_pages(struct mm_struct *mm, continue; } } - - if (should_force_cow_break(vma, foll_flags)) - foll_flags |= FOLL_WRITE; - retry: /* * If we have a pending SIGKILL, don't keep faulting pages and @@ -2689,19 +2674,6 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages, return -EFAULT; /* - * The FAST_GUP case requires FOLL_WRITE even for pure reads, - * because get_user_pages() may need to cause an early COW in - * order to avoid confusing the normal COW routines. So only - * targets that are already writable are safe to do by just - * looking at the page tables. - * - * NOTE! With FOLL_FAST_ONLY we allow read-only gup_fast() here, - * because there is no slow path to fall back on. But you'd - * better be careful about possible COW pages - you'll get _a_ - * COW page, but not necessarily the one you intended to get - * depending on what COW event happens after this. COW may break - * the page copy in a random direction. - * * Disable interrupts. The nested form is used, in order to allow * full, general purpose use of this routine. * @@ -2714,8 +2686,6 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages, */ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) { unsigned long fast_flags = gup_flags; - if (!(gup_flags & FOLL_FAST_ONLY)) - fast_flags |= FOLL_WRITE; local_irq_save(flags); gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2ccff8472cd4b63078a93ac530fc9a50bb8b0e92..7ff29cc3d55cb52e8f202e68ed3fd10dbe46178c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1291,12 +1291,13 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) } /* - * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, - * but only after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE can write to even unwritable pmd's, but only + * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) { - return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); + return pmd_write(pmd) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); } struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a301c2d672bf5246f1ec4bf709374b3b78489189..67fc6383995b430c7b6c6ebb0c01dc2128dbe2b4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1250,21 +1250,32 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { unsigned long nr_pages = 1UL << huge_page_order(h); + if (nid == NUMA_NO_NODE) + nid = numa_mem_id(); #ifdef CONFIG_CMA { struct page *page; int node; - for_each_node_mask(node, *nodemask) { - if (!hugetlb_cma[node]) - continue; - - page = cma_alloc(hugetlb_cma[node], nr_pages, - huge_page_order(h), true); + if (hugetlb_cma[nid]) { + page = cma_alloc(hugetlb_cma[nid], nr_pages, + huge_page_order(h), true); if (page) return page; } + + if (!(gfp_mask & __GFP_THISNODE)) { + for_each_node_mask(node, *nodemask) { + if (node == nid || !hugetlb_cma[node]) + continue; + + page = cma_alloc(hugetlb_cma[node], nr_pages, + huge_page_order(h), true); + if (page) + return page; + } + } } #endif @@ -3454,6 +3465,22 @@ static unsigned int allowed_mems_nr(struct hstate *h) } #ifdef CONFIG_SYSCTL +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, + void *buffer, size_t *length, + loff_t *ppos, unsigned long *out) +{ + struct ctl_table dup_table; + + /* + * In order to avoid races with __do_proc_doulongvec_minmax(), we + * can duplicate the @table and alter the duplicate of it. + */ + dup_table = *table; + dup_table.data = out; + + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); +} + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) @@ -3465,9 +3492,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (!hugepages_supported()) return -EOPNOTSUPP; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -3510,9 +3536,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, if (write && hstate_is_gigantic(h)) return -EINVAL; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index aabf65d4d91bacc9987f1faed41f834368b39b40..1f87aec9ab5c77a1215714d8afd40d6829be6703 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -655,7 +655,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx) snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf); cft->private = MEMFILE_PRIVATE(idx, 0); cft->seq_show = hugetlb_events_show; - cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]), + cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]); cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the events.local file */ @@ -664,7 +664,7 @@ static void __init __hugetlb_cgroup_file_dfl_init(int idx) cft->private = MEMFILE_PRIVATE(idx, 0); cft->seq_show = hugetlb_events_local_show; cft->file_offset = offsetof(struct hugetlb_cgroup, - events_local_file[idx]), + events_local_file[idx]); cft->flags = CFTYPE_NOT_ON_ROOT; /* NULL terminate the last cft */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 15a9af791014c5602d9939606503c174837da08d..cfa0dba5fd3bc3b6b252acad3720d403b8ceea71 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -466,7 +466,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; @@ -1709,7 +1709,7 @@ static void collapse_file(struct mm_struct *mm, xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, - PAGE_SIZE); + end - index); /* drain pagevecs to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); diff --git a/mm/ksm.c b/mm/ksm.c index 0aa2247bddd76dbee9099f2df7f46542f7d6e7e4..235f55d015410df6805de764ad9466dc83ec0dc4 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2453,6 +2453,10 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, if (vma_is_dax(vma)) return 0; +#ifdef VM_SAO + if (*vm_flags & VM_SAO) + return 0; +#endif #ifdef VM_SPARC_ADI if (*vm_flags & VM_SPARC_ADI) return 0; @@ -2657,31 +2661,6 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) goto again; } -bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, - unsigned long address) -{ -#ifdef CONFIG_DEBUG_VM - if (WARN_ON(is_zero_pfn(page_to_pfn(page))) || - WARN_ON(!page_mapped(page)) || - WARN_ON(!PageLocked(page))) { - dump_page(page, "reuse_ksm_page"); - return false; - } -#endif - - if (PageSwapCache(page) || !page_stable_node(page)) - return false; - /* Prohibit parallel get_ksm_page() */ - if (!page_ref_freeze(page, 1)) - return false; - - page_move_anon_rmap(page, vma); - page->index = linear_page_index(vma, address); - page_ref_unfreeze(page, 1); - - return true; -} #ifdef CONFIG_MIGRATION void ksm_migrate_page(struct page *newpage, struct page *oldpage) { diff --git a/mm/madvise.c b/mm/madvise.c index dd1d43cf026deab477489aecc92c33dba12f89c7..d4aa5f7765435ee912c5cb758e310228289bcda3 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -289,9 +289,9 @@ static long madvise_willneed(struct vm_area_struct *vma, */ *prev = NULL; /* tell sys_madvise we drop mmap_lock */ get_file(file); - mmap_read_unlock(current->mm); offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + mmap_read_unlock(current->mm); vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); fput(file); mmap_read_lock(current->mm); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b807952b4d431b60c52e2fef59a093ed6cc6f373..cfa6cbad21d55c977e9c6983d6eab18fc7a693bf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6774,6 +6774,9 @@ static void uncharge_batch(const struct uncharge_gather *ug) __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); local_irq_restore(flags); + + /* drop reference from uncharge_page */ + css_put(&ug->memcg->css); } static void uncharge_page(struct page *page, struct uncharge_gather *ug) @@ -6797,6 +6800,9 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug) uncharge_gather_clear(ug); } ug->memcg = page->mem_cgroup; + + /* pairs with css_put in uncharge_batch */ + css_get(&ug->memcg->css); } nr_pages = compound_nr(page); diff --git a/mm/memory.c b/mm/memory.c index 3a7779d9891d956a96b7a3dc44495fd5f16040da..469af373ae76e16c5879687e3db1964c29491169 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -73,6 +73,7 @@ #include #include #include +#include #include @@ -83,6 +84,7 @@ #include #include +#include "pgalloc-track.h" #include "internal.h" #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) @@ -2206,7 +2208,8 @@ EXPORT_SYMBOL(vm_iomap_memory); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data, bool create) + pte_fn_t fn, void *data, bool create, + pgtbl_mod_mask *mask) { pte_t *pte; int err = 0; @@ -2214,7 +2217,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, if (create) { pte = (mm == &init_mm) ? - pte_alloc_kernel(pmd, addr) : + pte_alloc_kernel_track(pmd, addr, mask) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; @@ -2235,6 +2238,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, break; } } while (addr += PAGE_SIZE, addr != end); + *mask |= PGTBL_PTE_MODIFIED; arch_leave_lazy_mmu_mode(); @@ -2245,7 +2249,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data, bool create) + pte_fn_t fn, void *data, bool create, + pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; @@ -2254,7 +2259,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, BUG_ON(pud_huge(*pud)); if (create) { - pmd = pmd_alloc(mm, pud, addr); + pmd = pmd_alloc_track(mm, pud, addr, mask); if (!pmd) return -ENOMEM; } else { @@ -2264,7 +2269,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, next = pmd_addr_end(addr, end); if (create || !pmd_none_or_clear_bad(pmd)) { err = apply_to_pte_range(mm, pmd, addr, next, fn, data, - create); + create, mask); if (err) break; } @@ -2274,14 +2279,15 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data, bool create) + pte_fn_t fn, void *data, bool create, + pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; int err = 0; if (create) { - pud = pud_alloc(mm, p4d, addr); + pud = pud_alloc_track(mm, p4d, addr, mask); if (!pud) return -ENOMEM; } else { @@ -2291,7 +2297,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, next = pud_addr_end(addr, end); if (create || !pud_none_or_clear_bad(pud)) { err = apply_to_pmd_range(mm, pud, addr, next, fn, data, - create); + create, mask); if (err) break; } @@ -2301,14 +2307,15 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, - pte_fn_t fn, void *data, bool create) + pte_fn_t fn, void *data, bool create, + pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; int err = 0; if (create) { - p4d = p4d_alloc(mm, pgd, addr); + p4d = p4d_alloc_track(mm, pgd, addr, mask); if (!p4d) return -ENOMEM; } else { @@ -2318,7 +2325,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, next = p4d_addr_end(addr, end); if (create || !p4d_none_or_clear_bad(p4d)) { err = apply_to_pud_range(mm, p4d, addr, next, fn, data, - create); + create, mask); if (err) break; } @@ -2331,8 +2338,9 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, void *data, bool create) { pgd_t *pgd; - unsigned long next; + unsigned long start = addr, next; unsigned long end = addr + size; + pgtbl_mod_mask mask = 0; int err = 0; if (WARN_ON(addr >= end)) @@ -2343,11 +2351,14 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, next = pgd_addr_end(addr, end); if (!create && pgd_none_or_clear_bad(pgd)) continue; - err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create); + err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask); if (err) break; } while (pgd++, addr = next, addr != end); + if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + arch_sync_kernel_mappings(start, start + size); + return err; } @@ -2622,6 +2633,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); + count_vm_event(PGREUSE); } /* @@ -2927,50 +2939,25 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * not dirty accountable. */ if (PageAnon(vmf->page)) { - int total_map_swapcount; - if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) || - page_count(vmf->page) != 1)) + struct page *page = vmf->page; + + /* PageKsm() doesn't necessarily raise the page refcount */ + if (PageKsm(page) || page_count(page) != 1) + goto copy; + if (!trylock_page(page)) + goto copy; + if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) { + unlock_page(page); goto copy; - if (!trylock_page(vmf->page)) { - get_page(vmf->page); - pte_unmap_unlock(vmf->pte, vmf->ptl); - lock_page(vmf->page); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); - if (!pte_same(*vmf->pte, vmf->orig_pte)) { - update_mmu_tlb(vma, vmf->address, vmf->pte); - unlock_page(vmf->page); - pte_unmap_unlock(vmf->pte, vmf->ptl); - put_page(vmf->page); - return 0; - } - put_page(vmf->page); - } - if (PageKsm(vmf->page)) { - bool reused = reuse_ksm_page(vmf->page, vmf->vma, - vmf->address); - unlock_page(vmf->page); - if (!reused) - goto copy; - wp_page_reuse(vmf); - return VM_FAULT_WRITE; - } - if (reuse_swap_page(vmf->page, &total_map_swapcount)) { - if (total_map_swapcount == 1) { - /* - * The page is all ours. Move it to - * our anon_vma so the rmap code will - * not search our parent or siblings. - * Protected against the rmap code by - * the page lock. - */ - page_move_anon_rmap(vmf->page, vma); - } - unlock_page(vmf->page); - wp_page_reuse(vmf); - return VM_FAULT_WRITE; } - unlock_page(vmf->page); + /* + * Ok, we've got the only map reference, and the only + * page count reference, and the page is locked, + * it's dark out, and we're wearing sunglasses. Hit it. + */ + wp_page_reuse(vmf); + unlock_page(page); + return VM_FAULT_WRITE; } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { return wp_page_shared(vmf); @@ -4247,6 +4234,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) vmf->flags & FAULT_FLAG_WRITE)) { update_mmu_cache(vmf->vma, vmf->address, vmf->pte); } else { + /* Skip spurious TLB flush for retried page fault */ + if (vmf->flags & FAULT_FLAG_TRIED) + goto unlock; /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. diff --git a/mm/memremap.c b/mm/memremap.c index 03e38b7a38f1a434d5f63294c6675672d9eed227..006dace60b1a9d8e331f34c24dcea9f96036cdb9 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -216,7 +216,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) return ERR_PTR(-EINVAL); } break; - case MEMORY_DEVICE_DEVDAX: + case MEMORY_DEVICE_GENERIC: need_devmap_managed = false; break; case MEMORY_DEVICE_PCI_P2PDMA: diff --git a/mm/migrate.c b/mm/migrate.c index 34a842a8eb6a7b85b191e58adca66002a1a5072c..941b89383cf3dec58a2fcfa7cca09559ddb4fc04 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -246,13 +246,13 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, else if (pte_swp_uffd_wp(*pvmw.pte)) pte = pte_mkuffd_wp(pte); - if (unlikely(is_zone_device_page(new))) { - if (is_device_private_page(new)) { - entry = make_device_private_entry(new, pte_write(pte)); - pte = swp_entry_to_pte(entry); - if (pte_swp_uffd_wp(*pvmw.pte)) - pte = pte_mkuffd_wp(pte); - } + if (unlikely(is_device_private_page(new))) { + entry = make_device_private_entry(new, pte_write(pte)); + pte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(*pvmw.pte)) + pte = pte_swp_mksoft_dirty(pte); + if (pte_swp_uffd_wp(*pvmw.pte)) + pte = pte_swp_mkuffd_wp(pte); } #ifdef CONFIG_HUGETLB_PAGE @@ -2427,10 +2427,17 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, entry = make_migration_entry(page, mpfn & MIGRATE_PFN_WRITE); swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pte)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pte)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + if (pte_present(pte)) { + if (pte_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pte)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + if (pte_swp_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pte)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } set_pte_at(mm, addr, ptep, swp_pte); /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0e2bab486fea6f4d2a6f79c13255d33e097dcb9e..fab5e97dc9ca5b10b09984fdde6c90a6bc4daff0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1302,6 +1302,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, struct page *page, *tmp; LIST_HEAD(head); + /* + * Ensure proper count is passed which otherwise would stuck in the + * below while (list_empty(list)) loop. + */ + count = min(pcp->count, count); while (count) { struct list_head *list; @@ -7888,7 +7893,7 @@ int __meminit init_per_zone_wmark_min(void) return 0; } -core_initcall(init_per_zone_wmark_min) +postcore_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/rmap.c b/mm/rmap.c index 83cc459edc407fc26392df777bebd15ce5df419b..9425260774a1f0d034d6a3d434ff153b6e7e8440 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1511,9 +1511,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, */ entry = make_migration_entry(page, 0); swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pteval)) + + /* + * pteval maps a zone device page and is therefore + * a swap pte. + */ + if (pte_swp_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pteval)) + if (pte_swp_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); /* diff --git a/mm/rodata_test.c b/mm/rodata_test.c index 2a99df7beeb3504115053b2a872f4a80b24c6bd6..2613371945b7ec6f8f1ce24ad378ba539f4d9bc0 100644 --- a/mm/rodata_test.c +++ b/mm/rodata_test.c @@ -7,6 +7,7 @@ */ #define pr_fmt(fmt) "rodata_test: " fmt +#include #include #include diff --git a/mm/slub.c b/mm/slub.c index 68c02b2eecd90334588493cd0b28058c15c4c11d..d4177aecedf6bb7cfd45b74b61b8ce2243eebc68 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -672,12 +672,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) } static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { if ((s->flags & SLAB_CONSISTENCY_CHECKS) && - !check_valid_pointer(s, page, nextfree)) { - object_err(s, page, freelist, "Freechain corrupt"); - freelist = NULL; + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; slab_fix(s, "Isolate corrupted freechain"); return true; } @@ -1494,7 +1494,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { return false; } @@ -2184,7 +2184,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, * 'freelist' is already corrupted. So isolate all objects * starting at 'freelist'. */ - if (freelist_corrupted(s, page, freelist, nextfree)) + if (freelist_corrupted(s, page, &freelist, nextfree)) break; do { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b482d240f9a250c82d2b15347fedda3ef6c55cc9..be4724b916b3e782a60d5dfa567335676072373c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -104,6 +104,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next, mask); + + cond_resched(); } while (pmd++, addr = next, addr != end); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 99e1796eb8336236d4eacefd2e51ca00d76786b8..9727dd8e2581b2f1f1d42ce9d55cea728b40db68 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2615,6 +2615,14 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) unsigned long reclaimed; unsigned long scanned; + /* + * This loop can become CPU-bound when target memcgs + * aren't eligible for reclaim - either because they + * don't have any reclaimable pages, or because their + * memory is explicitly protected. Avoid soft lockups. + */ + cond_resched(); + mem_cgroup_calculate_protection(target_memcg, memcg); if (mem_cgroup_below_min(memcg)) { diff --git a/mm/vmstat.c b/mm/vmstat.c index e670f910cd2f3ba86ae0d75ba4a0f85b7ab907f3..4f7b4ee6aa123cdf10138e7500ea98edcd98f6de 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1241,6 +1241,7 @@ const char * const vmstat_text[] = { "pglazyfreed", "pgrefill", + "pgreuse", "pgsteal_kswapd", "pgsteal_direct", "pgscan_kswapd", diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 3dd7c972677be27b276c44942419a33a79b13214..ec8408d1638fbdf2742a3c07eaeb8ac0511883f5 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -367,7 +367,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: if (!net_eq(dev_net(dev), &init_net)) break; - /* fall through */ + fallthrough; case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 3debad93be1a602ca65d523e3d0ac4df1e88b85b..bc8807d9281fd39130ac997ec5eead26e6e4feb4 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -520,7 +520,7 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/net/atm/common.c b/net/atm/common.c index 84367b844b147376a7718fbdc51b2140111c5de5..1cfa9bf1d18713afc7191ddecb256bfb3844154e 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -297,7 +297,7 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) break; default: pr_warn("AAL problems ... (%d)\n", aal); - /* fall through */ + fallthrough; case ATM_AAL5: max_sdu = ATM_MAX_AAL5_PDU; } @@ -417,7 +417,7 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, case ATM_NO_AAL: /* ATM_AAL5 is also used in the "0 for default" case */ vcc->qos.aal = ATM_AAL5; - /* fall through */ + fallthrough; case ATM_AAL5: error = atm_init_aal5(vcc); vcc->stats = &dev->stats.aal5; diff --git a/net/atm/lec.c b/net/atm/lec.c index 875fc0bc17800c9bb6528cf6b828e4467d43d73e..b570ef919c28f057fc7dc54c4164b89d572c64b5 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -380,7 +380,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) if (mesg->content.normal.no_source_le_narp) break; - /* FALL THROUGH */ + fallthrough; case l_arp_update: lec_arp_update(priv, mesg->content.normal.mac_addr, mesg->content.normal.atm_addr, diff --git a/net/atm/resources.c b/net/atm/resources.c index 94bdc6527ee8a4341bbd3129d26bd9404422c41e..53236986dfe09c377c85f98d176ee7cde0c4bd9a 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -266,7 +266,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, goto done; } } - /* fall through */ + fallthrough; case ATM_SETESIF: { unsigned char esi[ESI_LEN]; @@ -288,7 +288,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, error = -EPERM; goto done; } - /* fall through */ + fallthrough; case ATM_GETSTAT: size = sizeof(struct atm_dev_stats); error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ); @@ -361,7 +361,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, error = -EINVAL; goto done; } - /* fall through */ + fallthrough; case ATM_SETCIRANGE: case SONET_GETSTATZ: case SONET_SETDIAG: @@ -371,7 +371,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len, error = -EPERM; goto done; } - /* fall through */ + fallthrough; default: if (IS_ENABLED(CONFIG_COMPAT) && compat) { #ifdef CONFIG_COMPAT diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 0f8495b9eeb1263c1784514f3b8616a724f2662f..717fe657561d909efd69297851cdbf4c0b6e36b2 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -881,6 +881,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); + if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@ -1008,11 +1014,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; - ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 91a04ca373dc105a1821449c688d570ba72059ce..8500f56cbd10cb006e0f3872cc4650c64f6e02e3 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index a18dcc686dc315c621ed0da23dbed218ba1843c5..ef3f85b576c4c7cc6b0a7d2c2c3e987b53218291 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ - if (ret == BATADV_DHCP_TO_CLIENT && - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { + if (ret == BATADV_DHCP_TO_CLIENT) { + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) + return BATADV_DHCP_NO; + /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 99eb8c6c0fbccee91de7f3853c470301fe26263c..a66f211726e7c98cce2da4a422331c48e1508f82 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -425,7 +425,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: is_l2 = true; - /* fall through */ + fallthrough; case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_XMIT: diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 1641f414d1ba670966c81506e23cebabe9b09774..ebe33b60efd6b064d374b12439cf7e81abddcf50 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2238,6 +2238,10 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, struct ebt_table *t; struct net *net = sock_net(sk); + if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) && + *len != sizeof(struct compat_ebt_replace)) + return -EINVAL; + if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index 8096732223828a7110ab35d715fe1894ddf57a91..8d033a75a766ef529e201bc04060c2b14e7709cb 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb, static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, const struct nf_hook_state *state) { +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) u16 zone_id = NF_CT_DEFAULT_ZONE_ID; enum ip_conntrack_info ctinfo; struct br_input_skb_cb cb; @@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb, br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm)); - err = nf_ipv6_br_defrag(state->net, skb, - IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); + err = nf_ct_frag6_gather(state->net, skb, + IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id); /* queued */ if (err == -EINPROGRESS) return NF_STOLEN; br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size); return err == 0 ? NF_ACCEPT : NF_DROP; +#else + return NF_ACCEPT; +#endif } static int nf_ct_br_ip_check(const struct sk_buff *skb) diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index ce2767e9cec6582dcb970270ce91be0c6e9799ad..7b0af33bdb97fd6ef1c15c6028993ee942b5806c 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -116,7 +116,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) if (segmented) { if (rfml->incomplete_frm == NULL) { /* Initial Segment */ - if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0) + if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0) goto out; rfml->pdu_size = get_unaligned_le16(rfml->seghead+4); @@ -233,7 +233,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE) err = cfpkt_peek_head(pkt, head, 6); - if (err < 0) + if (err != 0) goto out; while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) { diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 78ff9b3f1d40c732ba39b2402b5099ba84f8a4a5..1be4c898b2fa884b96c3363a1dca09279f541e89 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk) spin_lock_init(&jsk->sk_session_queue_lock); INIT_LIST_HEAD(&jsk->sk_session_queue); sk->sk_destruct = j1939_sk_sock_destruct; + sk->sk_protocol = CAN_J1939; return 0; } @@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out_release_sock; } + if (!ndev->ml_priv) { + netdev_warn_once(ndev, + "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n"); + dev_put(ndev); + ret = -ENODEV; + goto out_release_sock; + } + priv = j1939_netdev_start(ndev); dev_put(ndev); if (IS_ERR(priv)) { @@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, const struct j1939_sock *jsk, int peer) { + /* There are two holes (2 bytes and 3 bytes) to clear to avoid + * leaking kernel information to user space. + */ + memset(addr, 0, J1939_MIN_NAMELEN); + addr->can_family = AF_CAN; addr->can_ifindex = jsk->ifindex; addr->can_addr.j1939.pgn = jsk->addr.pgn; @@ -1072,7 +1086,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, break; case -ERESTARTSYS: ret = -EINTR; - /* fall through */ + fallthrough; case -EAGAIN: /* OK */ if (todo_size != size) ret = size - todo_size; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 9f99af5b0b11e74b7718be8fa3300e01fc36a4dc..0cec4152f9797c51b1cf93c2e9efb91d9a71239d 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session, skb_queue_tail(&session->skb_queue, skb); } -static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +static struct +sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session, + unsigned int offset_start) { struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *do_skcb; struct sk_buff *skb = NULL; struct sk_buff *do_skb; - struct j1939_sk_buff_cb *do_skcb; - unsigned int offset_start; unsigned long flags; - offset_start = session->pkt.dpo * 7; - spin_lock_irqsave(&session->skb_queue.lock, flags); skb_queue_walk(&session->skb_queue, do_skb) { do_skcb = j1939_skb_to_cb(do_skb); @@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) return skb; } +static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +{ + unsigned int offset_start; + + offset_start = session->pkt.dpo * 7; + return j1939_session_skb_find_by_offset(session, offset_start); +} + /* see if we are receiver * returns 0 for broadcasts, although we will receive them */ @@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session) return ret; session->last_txcmd = dat[0]; - if (dat[0] == J1939_TP_CMD_BAM) + if (dat[0] == J1939_TP_CMD_BAM) { j1939_tp_schedule_txtimer(session, 50); - - j1939_tp_set_rxtimeout(session, 1250); + j1939_tp_set_rxtimeout(session, 250); + } else { + j1939_tp_set_rxtimeout(session, 1250); + } netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); @@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session) int ret = 0; u8 dat[8]; - se_skb = j1939_session_skb_find(session); + se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7); if (!se_skb) return -ENOBUFS; @@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session) if (len > 7) len = 7; + if (offset + len > se_skb->len) { + netdev_err_once(priv->ndev, + "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n", + __func__, session, skcb->offset, se_skb->len , session->pkt.tx); + return -EOVERFLOW; + } + + if (!len) { + ret = -ENOBUFS; + break; + } + memcpy(&dat[1], &tpdat[offset], len); ret = j1939_tp_tx_dat(session, dat, len + 1); if (ret < 0) { @@ -839,7 +860,7 @@ static int j1939_xtp_txnext_transmiter(struct j1939_session *session) return ret; } - /* fall through */ + fallthrough; case J1939_TP_CMD_CTS: case 0xff: /* did some data */ case J1939_ETP_CMD_DPO: @@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session, lockdep_assert_held(&session->priv->active_session_list_lock); session->err = j1939_xtp_abort_to_errno(priv, err); + session->state = J1939_SESSION_WAITING_ABORT; /* do not send aborts on incoming broadcasts */ if (!j1939_cb_is_broadcast(&session->skcb)) { - session->state = J1939_SESSION_WAITING_ABORT; j1939_xtp_tx_abort(priv, &session->skcb, !session->transmission, err, session->skcb.addr.pgn); @@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) * cleanup including propagation of the error to user space. */ break; + case -EOVERFLOW: + j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG); + break; case 0: session->tx_retry = 0; break; @@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, return; } session = j1939_xtp_rx_rts_session_new(priv, skb); - if (!session) + if (!session) { + if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb)) + netdev_info(priv->ndev, "%s: failed to create TP BAM session\n", + __func__); return; + } } else { if (j1939_xtp_rx_rts_session_active(session, skb)) { j1939_session_put(session); @@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, } session->last_cmd = cmd; - j1939_tp_set_rxtimeout(session, 1250); - - if (cmd != J1939_TP_CMD_BAM && !session->transmission) { - j1939_session_txtimer_cancel(session); - j1939_tp_schedule_txtimer(session, 0); + if (cmd == J1939_TP_CMD_BAM) { + if (!session->transmission) + j1939_tp_set_rxtimeout(session, 750); + } else { + if (!session->transmission) { + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + j1939_tp_set_rxtimeout(session, 1250); } j1939_session_put(session); @@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, int offset; int nbytes; bool final = false; + bool remain = false; bool do_cts_eoma = false; int packet; @@ -1731,12 +1764,12 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, case J1939_ETP_CMD_DPO: if (skcb->addr.type == J1939_ETP) break; - /* fall through */ - case J1939_TP_CMD_BAM: /* fall through */ + fallthrough; + case J1939_TP_CMD_BAM: case J1939_TP_CMD_CTS: /* fall through */ if (skcb->addr.type != J1939_ETP) break; - /* fall through */ + fallthrough; default: netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__, session, session->last_cmd); @@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, __func__, session); goto out_session_cancel; } - se_skb = j1939_session_skb_find(session); + + se_skb = j1939_session_skb_find_by_offset(session, packet * 7); if (!se_skb) { netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__, session); @@ -1769,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, } tpdat = se_skb->data; - memcpy(&tpdat[offset], &dat[1], nbytes); + if (!session->transmission) { + memcpy(&tpdat[offset], &dat[1], nbytes); + } else { + int err; + + err = memcmp(&tpdat[offset], &dat[1], nbytes); + if (err) + netdev_err_once(priv->ndev, + "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n", + __func__, session, + nbytes, &dat[1], + nbytes, &tpdat[offset]); + } + if (packet == session->pkt.rx) session->pkt.rx++; @@ -1777,6 +1824,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, j1939_cb_is_broadcast(&session->skcb)) { if (session->pkt.rx >= session->pkt.total) final = true; + else + remain = true; } else { /* never final, an EOMA must follow */ if (session->pkt.rx >= session->pkt.last) @@ -1784,7 +1833,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session, } if (final) { + j1939_session_timers_cancel(session); j1939_session_completed(session); + } else if (remain) { + if (!session->transmission) + j1939_tp_set_rxtimeout(session, 750); } else if (do_cts_eoma) { j1939_tp_set_rxtimeout(session, 1250); if (!session->transmission) @@ -1829,6 +1882,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb) else j1939_xtp_rx_dat_one(session, skb); } + + if (j1939_cb_is_broadcast(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + false); + if (session) + j1939_xtp_rx_dat_one(session, skb); + } } /* j1939 main intf */ @@ -1905,8 +1965,8 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) switch (cmd) { case J1939_ETP_CMD_RTS: extd = J1939_ETP; - /* fall through */ - case J1939_TP_CMD_BAM: /* fall through */ + fallthrough; + case J1939_TP_CMD_BAM: case J1939_TP_CMD_RTS: /* fall through */ if (skcb->addr.type != extd) return; @@ -1920,14 +1980,14 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) if (j1939_tp_im_transmitter(skcb)) j1939_xtp_rx_rts(priv, skb, true); - if (j1939_tp_im_receiver(skcb)) + if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb)) j1939_xtp_rx_rts(priv, skb, false); break; case J1939_ETP_CMD_CTS: extd = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_CMD_CTS: if (skcb->addr.type != extd) return; @@ -1954,7 +2014,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) case J1939_ETP_CMD_EOMA: extd = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_CMD_EOMA: if (skcb->addr.type != extd) return; @@ -1984,20 +2044,20 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb) { struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); - if (!j1939_tp_im_involved_anydir(skcb)) + if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb)) return 0; switch (skcb->addr.pgn) { case J1939_ETP_PGN_DAT: skcb->addr.type = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_PGN_DAT: j1939_xtp_rx_dat(priv, skb); break; case J1939_ETP_PGN_CTL: skcb->addr.type = J1939_ETP; - /* fall through */ + fallthrough; case J1939_TP_PGN_CTL: if (skb->len < 8) return 0; /* Don't care. Nothing to extract here */ @@ -2017,6 +2077,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb) if (!skb->sk) return; + if (skb->sk->sk_family != AF_CAN || + skb->sk->sk_protocol != CAN_J1939) + return; + j1939_session_list_lock(priv); session = j1939_session_get_simple(priv, skb); j1939_session_list_unlock(priv); diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 81e1e006c5404ddb3d18c78d4bb89850cde198e1..16a47c0eef374dbe6efbd2bb20f518c25a038761 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -50,35 +50,35 @@ unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) switch (len) { case 11: c = c + ((__u32)k[10] << 24); - /* fall through */ + fallthrough; case 10: c = c + ((__u32)k[9] << 16); - /* fall through */ + fallthrough; case 9: c = c + ((__u32)k[8] << 8); /* the first byte of c is reserved for the length */ - /* fall through */ + fallthrough; case 8: b = b + ((__u32)k[7] << 24); - /* fall through */ + fallthrough; case 7: b = b + ((__u32)k[6] << 16); - /* fall through */ + fallthrough; case 6: b = b + ((__u32)k[5] << 8); - /* fall through */ + fallthrough; case 5: b = b + k[4]; - /* fall through */ + fallthrough; case 4: a = a + ((__u32)k[3] << 24); - /* fall through */ + fallthrough; case 3: a = a + ((__u32)k[2] << 16); - /* fall through */ + fallthrough; case 2: a = a + ((__u32)k[1] << 8); - /* fall through */ + fallthrough; case 1: a = a + k[0]; /* case 0: nothing left to add */ diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 07e5614eb3f161457ac5120c668330734d47031f..7057f8db4f99a1b2092c649a5d55e6375b608077 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -987,7 +987,7 @@ int crush_do_rule(const struct crush_map *map, case CRUSH_RULE_CHOOSELEAF_FIRSTN: case CRUSH_RULE_CHOOSE_FIRSTN: firstn = 1; - /* fall through */ + fallthrough; case CRUSH_RULE_CHOOSELEAF_INDEP: case CRUSH_RULE_CHOOSE_INDEP: if (wsize == 0) diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 27d6ab11f9ee87a756ee5b6dc23247e4204156c1..bdfd66ba3843146ca2e45fbc5a0e4affcc203758 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -412,7 +412,7 @@ static void ceph_sock_state_change(struct sock *sk) switch (sk->sk_state) { case TCP_CLOSE: dout("%s TCP_CLOSE\n", __func__); - /* fall through */ + fallthrough; case TCP_CLOSE_WAIT: dout("%s TCP_CLOSE_WAIT\n", __func__); con_sock_state_closing(con); @@ -2751,7 +2751,7 @@ static int try_read(struct ceph_connection *con) switch (ret) { case -EBADMSG: con->error_msg = "bad crc/signature"; - /* fall through */ + fallthrough; case -EBADE: ret = -EIO; break; diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 3d8c8015e976fe8bb1d9708cd76bce1f1426981c..d633a0aeaa5525f8196eaa7881d258da402ba5d2 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -1307,7 +1307,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, * request had a non-zero tid. Work around this weirdness * by allocating a new message. */ - /* fall through */ + fallthrough; case CEPH_MSG_MON_MAP: case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index e4fbcad6e7d83c580adc1396aed4bc8e61f9d0a1..7901ab6c79fd2aca7df36a998c2a23df860e2fe4 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3854,7 +3854,7 @@ static void scan_requests(struct ceph_osd *osd, if (!force_resend && !force_resend_writes) break; - /* fall through */ + fallthrough; case CALC_TARGET_NEED_RESEND: cancel_linger_map_check(lreq); /* @@ -3891,7 +3891,7 @@ static void scan_requests(struct ceph_osd *osd, !force_resend_writes)) break; - /* fall through */ + fallthrough; case CALC_TARGET_NEED_RESEND: cancel_map_check(req); unlink_request(osd, req); diff --git a/net/core/dev.c b/net/core/dev.c index 7df6c9617321a3c5a25d9fdb5f661315677c555c..4086d335978c1bf62bd3965bd2ea96a4ac06b13d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4690,10 +4690,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(skb->dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: do_drop: kfree_skb(skb); @@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, netdev_err_once(dev, "%s() called with weight %d\n", __func__, weight); napi->weight = weight; - list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); @@ -8742,13 +8743,15 @@ struct bpf_xdp_link { int flags; }; -static enum bpf_xdp_mode dev_xdp_mode(u32 flags) +static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags) { if (flags & XDP_FLAGS_HW_MODE) return XDP_MODE_HW; if (flags & XDP_FLAGS_DRV_MODE) return XDP_MODE_DRV; - return XDP_MODE_SKB; + if (flags & XDP_FLAGS_SKB_MODE) + return XDP_MODE_SKB; + return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB; } static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) @@ -8896,7 +8899,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack return -EINVAL; } - mode = dev_xdp_mode(flags); + mode = dev_xdp_mode(dev, flags); /* can't replace attached link */ if (dev_xdp_link(dev, mode)) { NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); @@ -8913,10 +8916,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack NL_SET_ERR_MSG(extack, "Active program does not match expected"); return -EEXIST; } - if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { - NL_SET_ERR_MSG(extack, "XDP program already attached"); - return -EBUSY; - } /* put effective new program into new_prog */ if (link) @@ -8927,6 +8926,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB ? XDP_MODE_DRV : XDP_MODE_SKB; + if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { + NL_SET_ERR_MSG(extack, "XDP program already attached"); + return -EBUSY; + } if (!offload && dev_xdp_prog(dev, other_mode)) { NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); return -EEXIST; @@ -8984,7 +8987,7 @@ static int dev_xdp_detach_link(struct net_device *dev, ASSERT_RTNL(); - mode = dev_xdp_mode(link->flags); + mode = dev_xdp_mode(dev, link->flags); if (dev_xdp_link(dev, mode) != link) return -EINVAL; @@ -9080,7 +9083,7 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, goto out_unlock; } - mode = dev_xdp_mode(xdp_link->flags); + mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, xdp_link->flags, new_prog); @@ -9164,7 +9167,7 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags) { - enum bpf_xdp_mode mode = dev_xdp_mode(flags); + enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags); struct bpf_prog *new_prog = NULL, *old_prog = NULL; int err; diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b2cf9b7bb7b8c328259fec9934cf4def07831532..205e92e604ef7ef04fc8379789554cfb6308b9ef 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -322,7 +322,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) err = net_hwtstamp_validate(ifr); if (err) return err; - /* fall through */ + fallthrough; /* * Unknown or private ioctl @@ -478,7 +478,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c case SIOCSIFTXQLEN: if (!capable(CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; /* * These ioctl calls: * - require local superuser power. @@ -503,7 +503,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c case SIOCSHWTSTAMP: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(net, ifr->ifr_name); diff --git a/net/core/devlink.c b/net/core/devlink.c index e674f0f46dc2be64bc5b2b148004826ae3833dde..80ec1cd81c6475fd6c5dcb37baeb7203b2ef687d 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4063,7 +4063,7 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id) { lockdep_assert_held(&devlink->lock); - if (WARN_ON(xa_load(&devlink->snapshot_ids, id))) + if (xa_load(&devlink->snapshot_ids, id)) return -EEXIST; return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0), @@ -6196,8 +6196,8 @@ devlink_trap_action_get_from_info(struct genl_info *info, val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]); switch (val) { - case DEVLINK_TRAP_ACTION_DROP: /* fall-through */ - case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */ + case DEVLINK_TRAP_ACTION_DROP: + case DEVLINK_TRAP_ACTION_TRAP: case DEVLINK_TRAP_ACTION_MIRROR: *p_trap_action = val; break; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index b09bebeadf0b2921039413dea5f8be2edb556c65..9704522b08721c7c4cd37cfafb520aab3209fdf0 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1189,7 +1189,7 @@ static int net_dm_alert_mode_get_from_info(struct genl_info *info, val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]); switch (val) { - case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */ + case NET_DM_ALERT_MODE_SUMMARY: case NET_DM_ALERT_MODE_PACKET: *p_alert_mode = val; break; diff --git a/net/core/filter.c b/net/core/filter.c index 7124f0fe69747ff77de4654c7fb1fca98e71d98f..1f647ab986b614d719555b402ed00757fe268095 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, /* Helper macro for adding read access to tcp_sock or sock fields. */ #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ + int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \ BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + fullsock_reg = reg; \ + jmp += 2; \ + } \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ is_fullsock), \ - si->dst_reg, si->src_reg, \ + fullsock_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, \ is_fullsock)); \ - *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \ + *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ + if (si->dst_reg == si->src_reg) \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ si->dst_reg, si->src_reg, \ @@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, OBJ_FIELD), \ si->dst_reg, si->dst_reg, \ offsetof(OBJ, OBJ_FIELD)); \ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_JMP_A(1); \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + } \ + } while (0) + +#define SOCK_OPS_GET_SK() \ + do { \ + int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == reg || si->src_reg == reg) \ + reg--; \ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + fullsock_reg = reg; \ + jmp += 2; \ + } \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct bpf_sock_ops_kern, \ + is_fullsock), \ + fullsock_reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + is_fullsock)); \ + *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ + if (si->dst_reg == si->src_reg) \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ + struct bpf_sock_ops_kern, sk),\ + si->dst_reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, sk));\ + if (si->dst_reg == si->src_reg) { \ + *insn++ = BPF_JMP_A(1); \ + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ + offsetof(struct bpf_sock_ops_kern, \ + temp)); \ + } \ } while (0) #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \ @@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked); break; case offsetof(struct bpf_sock_ops, sk): - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct bpf_sock_ops_kern, - is_fullsock), - si->dst_reg, si->src_reg, - offsetof(struct bpf_sock_ops_kern, - is_fullsock)); - *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( - struct bpf_sock_ops_kern, sk), - si->dst_reg, si->src_reg, - offsetof(struct bpf_sock_ops_kern, sk)); + SOCK_OPS_GET_SK(); break; } return insn - insn_buf; @@ -9174,7 +9223,7 @@ sk_reuseport_is_valid_access(int off, int size, case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < sizeof_field(struct sk_buff, protocol)) return false; - /* fall through */ + fallthrough; case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): case bpf_ctx_range(struct sk_reuseport_md, bind_inany): case bpf_ctx_range(struct sk_reuseport_md, len): diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 093e90e52bc25411806fe2f3d04ca27cab47ec32..2338753e936b7188d7d0ccb350cee8a34dd88172 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int cpu = smp_processor_id(); - list_for_each_entry(napi, &dev->napi_list, dev_list) { + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); smp_store_release(&napi->poll_owner, -1); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b53b6d38c4dff8b73423f19d7efc84b00166f136..44fdbb9c6e5347c08a624a07aa9fe616ab3ed4ae 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3430,7 +3430,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) net_info_ratelimited("%s xmit error: %d\n", pkt_dev->odevname, ret); pkt_dev->errors++; - /* fall through */ + fallthrough; case NETDEV_TX_BUSY: /* Retry it next time */ refcount_dec(&(pkt_dev->skb->users)); @@ -3699,7 +3699,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) cpu_to_node(cpu), "kpktgend_%d", cpu); if (IS_ERR(p)) { - pr_err("kernel_thread() failed for cpu %d\n", t->cpu); + pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu); list_del(&t->th_list); kfree(t); return PTR_ERR(p); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7e2e502ef51983fadd6956b0fdbf421b0c9aa1a6..6faf73d6a0f7431cd96a853b652fce68916c477f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb) } EXPORT_SYMBOL(skb_tx_error); +#ifdef CONFIG_TRACEPOINTS /** * consume_skb - free an skbuff * @skb: buffer to free @@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb) __kfree_skb(skb); } EXPORT_SYMBOL(consume_skb); +#endif /** * consume_stateless_skb - free an skbuff, assuming it is stateless @@ -5418,8 +5420,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; - - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) + /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; @@ -5987,9 +5989,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - if (k == 0) { - /* split line is in frag list */ - pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); + /* split line is in frag list */ + if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { + /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ + if (skb_has_frag_list(skb)) + kfree_skb_list(skb_shinfo(skb)->frag_list); + kfree(data); + return -ENOMEM; } skb_release_data(skb); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 6a32a1fd34f8cd1306a43f1f14bd6e038ff2efe6..649583158983a9e79ffa236c86dc8a26bb9060fd 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -772,7 +772,6 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, sk_psock_skb_redirect(skb); break; case __SK_DROP: - /* fall-through */ default: out_free: kfree_skb(skb); diff --git a/net/core/sock.c b/net/core/sock.c index e4f40b175acb7909602334301dd20c9de50a5e3e..6c5c6b18eff4c5271fd3d510f7a370122ab91964 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1008,7 +1008,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, break; case SO_TIMESTAMPING_NEW: sock_set_flag(sk, SOCK_TSTAMP_NEW); - /* fall through */ + fallthrough; case SO_TIMESTAMPING_OLD: if (val & ~SOF_TIMESTAMPING_MASK) { ret = -EINVAL; @@ -3254,7 +3254,7 @@ void sk_common_release(struct sock *sk) sk->sk_prot->destroy(sk); /* - * Observation: when sock_common_release is called, processes have + * Observation: when sk_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index aef72f6a28291faaaddebfaf5b6161ff28848f6d..b9ee1a4a895548aa2d5023441505a59227f4106e 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -608,7 +608,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, */ if (hc->rx_x_recv > 0) break; - /* fall through */ + fallthrough; case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index afc071ea1271e476520388f8310664d56fa6eb67..788dd629c4204af3178b8283da804c828b6ec1e5 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1407,7 +1407,8 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, * Negotiation during connection setup */ case DCCP_LISTEN: - server = true; /* fall through */ + server = true; + fallthrough; case DCCP_REQUESTING: switch (opt) { case DCCPO_CHANGE_L: diff --git a/net/dccp/input.c b/net/dccp/input.c index bd9cfdb67436c226899b7270597b49f2bf88fd4f..2cbb757a894f87ef79d3435865f37a49d313d928 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -64,7 +64,7 @@ static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) */ if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) break; - /* fall through */ + fallthrough; case DCCP_REQUESTING: case DCCP_ACTIVE_CLOSEREQ: dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); @@ -76,7 +76,7 @@ static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) queued = 1; dccp_fin(sk, skb); dccp_set_state(sk, DCCP_PASSIVE_CLOSE); - /* fall through */ + fallthrough; case DCCP_PASSIVE_CLOSE: /* * Retransmitted Close: we have already enqueued the first one. @@ -113,7 +113,7 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) queued = 1; dccp_fin(sk, skb); dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ); - /* fall through */ + fallthrough; case DCCP_PASSIVE_CLOSEREQ: sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); } @@ -530,7 +530,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_DATA: if (sk->sk_state == DCCP_RESPOND) break; - /* fall through */ + fallthrough; case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* @@ -684,7 +684,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* Step 8: if using Ack Vectors, mark packet acknowledgeable */ dccp_handle_ackvec_processing(sk, skb); dccp_deliver_input_to_ccids(sk, skb); - /* fall through */ + fallthrough; case DCCP_RESPOND: queued = dccp_rcv_respond_partopen_state_process(sk, skb, dh, len); diff --git a/net/dccp/options.c b/net/dccp/options.c index 51aaba7a5d4562ab019fa87b6c44b0b1d5d9e09d..d24cad05001e52f06889f3e9751eb91d384d2072 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -225,7 +225,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, * interested. The RX CCID need not parse Ack Vectors, * since it is only interested in clearing old state. */ - /* fall through */ + fallthrough; case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC: if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk, pkt_type, opt, value, len)) diff --git a/net/dccp/output.c b/net/dccp/output.c index 6433187a5cc442bbf8e2eb4ea694343eae11ceb8..50e6d5699bb2979f9a144e3dc62ef6f84aa02661 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -62,7 +62,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; - /* fall through */ + fallthrough; case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; @@ -72,12 +72,12 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) /* Use ISS on the first (non-retransmitted) Request. */ if (icsk->icsk_retransmits == 0) dcb->dccpd_seq = dp->dccps_iss; - /* fall through */ + fallthrough; case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; - /* fall through */ + fallthrough; default: /* * Set owner/destructor: some skbs are allocated via @@ -481,7 +481,7 @@ struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) case DCCP_RESET_CODE_PACKET_ERROR: dhr->dccph_reset_data[0] = rxdh->dccph_type; break; - case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */ + case DCCP_RESET_CODE_OPTION_ERROR: case DCCP_RESET_CODE_MANDATORY_ERROR: memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); break; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index d148ab1530e57bde6f115cfbdc428659747f6dd9..6d705d90c6149c4d6043645addd9db94b7d25353 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -101,7 +101,7 @@ void dccp_set_state(struct sock *sk, const int state) if (inet_csk(sk)->icsk_bind_hash != NULL && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) inet_put_port(sk); - /* fall through */ + fallthrough; default: if (oldstate == DCCP_OPEN) DCCP_DEC_STATS(DCCP_MIB_CURRESTAB); @@ -834,7 +834,7 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, case DCCP_PKT_CLOSEREQ: if (!(flags & MSG_PEEK)) dccp_finish_passive_close(sk); - /* fall through */ + fallthrough; case DCCP_PKT_RESET: dccp_pr_debug("found fin (%s) ok!\n", dccp_packet_name(dh->dccph_type)); @@ -960,7 +960,7 @@ static void dccp_terminate_connection(struct sock *sk) case DCCP_PARTOPEN: dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); - /* fall through */ + fallthrough; case DCCP_OPEN: dccp_send_close(sk, 1); @@ -969,7 +969,7 @@ static void dccp_terminate_connection(struct sock *sk) next_state = DCCP_ACTIVE_CLOSEREQ; else next_state = DCCP_CLOSING; - /* fall through */ + fallthrough; default: dccp_set_state(sk, next_state); } diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 3b53d766789d473a1a2c0d8a790f7515c36548be..5dbd45dc35ad3f44933b1fa1ba29ef4a7578249e 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -623,12 +623,12 @@ static void dn_destroy_sock(struct sock *sk) goto disc_reject; case DN_RUN: scp->state = DN_DI; - /* fall through */ + fallthrough; case DN_DI: case DN_DR: disc_reject: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); - /* fall through */ + fallthrough; case DN_NC: case DN_NR: case DN_RJ: @@ -642,7 +642,7 @@ static void dn_destroy_sock(struct sock *sk) break; default: printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); - /* fall through */ + fallthrough; case DN_O: dn_stop_slow_timer(sk); diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index c68503a180259467505bd1346995ea1bc00df755..c97bdca5ec30fc223af540363058f9ff94def019 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c @@ -483,7 +483,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) break; case DN_RUN: sk->sk_shutdown |= SHUTDOWN_MASK; - /* fall through */ + fallthrough; case DN_CC: scp->state = DN_CN; } diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 33fefb0aebca4687a3d119f70ba56a587d238b17..4086f9c746af4eb9ce6c42bcf3be2bdfaa2317ee 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -156,7 +156,7 @@ static void dn_rehash_zone(struct dn_zone *dz) default: printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor); - /* fall through */ + fallthrough; case 256: new_divisor = 1024; new_hashmask = 0x3FF; diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index deae519bdeecd986e15e4cf795d9380278eda560..67b5ab2657b7c9c0d20581a499e9f1bbbda798d5 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -75,7 +75,7 @@ static void strip_it(char *str) case '\r': case ':': *str = 0; - /* Fallthrough */ + fallthrough; case 0: return; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 41d60eeefdbd99503d15cfb9546e9ef7b86cd894..9af1a2d0cec4fd500586379a57fec1266f12adbf 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -2009,7 +2009,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr)) goto err_fdb_work_init; diff --git a/net/ethtool/features.c b/net/ethtool/features.c index 4e632dc987d859d7599c1c8b9a084c9fd01c76ea..495635f152ba6838622b698c7e92ffa9e273eb07 100644 --- a/net/ethtool/features.c +++ b/net/ethtool/features.c @@ -224,7 +224,9 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) DECLARE_BITMAP(wanted_diff_mask, NETDEV_FEATURE_COUNT); DECLARE_BITMAP(active_diff_mask, NETDEV_FEATURE_COUNT); DECLARE_BITMAP(old_active, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(old_wanted, NETDEV_FEATURE_COUNT); DECLARE_BITMAP(new_active, NETDEV_FEATURE_COUNT); + DECLARE_BITMAP(new_wanted, NETDEV_FEATURE_COUNT); DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT); DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT); struct nlattr *tb[ETHTOOL_A_FEATURES_MAX + 1]; @@ -250,6 +252,7 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) rtnl_lock(); ethnl_features_to_bitmap(old_active, dev->features); + ethnl_features_to_bitmap(old_wanted, dev->wanted_features); ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT, tb[ETHTOOL_A_FEATURES_WANTED], netdev_features_strings, info->extack); @@ -261,17 +264,15 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) goto out_rtnl; } - /* set req_wanted bits not in req_mask from old_active */ + /* set req_wanted bits not in req_mask from old_wanted */ bitmap_and(req_wanted, req_wanted, req_mask, NETDEV_FEATURE_COUNT); - bitmap_andnot(new_active, old_active, req_mask, NETDEV_FEATURE_COUNT); - bitmap_or(req_wanted, new_active, req_wanted, NETDEV_FEATURE_COUNT); - if (bitmap_equal(req_wanted, old_active, NETDEV_FEATURE_COUNT)) { - ret = 0; - goto out_rtnl; + bitmap_andnot(new_wanted, old_wanted, req_mask, NETDEV_FEATURE_COUNT); + bitmap_or(req_wanted, new_wanted, req_wanted, NETDEV_FEATURE_COUNT); + if (!bitmap_equal(req_wanted, old_wanted, NETDEV_FEATURE_COUNT)) { + dev->wanted_features &= ~dev->hw_features; + dev->wanted_features |= ethnl_bitmap_to_features(req_wanted) & dev->hw_features; + __netdev_update_features(dev); } - - dev->wanted_features = ethnl_bitmap_to_features(req_wanted); - __netdev_update_features(dev); ethnl_features_to_bitmap(new_active, dev->features); mod = !bitmap_equal(old_active, new_active, NETDEV_FEATURE_COUNT); diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index bbe9b3b2d3959fef28d1d94a0bcc0a71077d607d..be6f06adefe0a7ec46afcc3890d2bf387fcf3fba 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -195,7 +195,7 @@ static int lowpan_frag_rx_handlers_result(struct sk_buff *skb, net_warn_ratelimited("%s: received unknown dispatch\n", __func__); - /* fall-through */ + fallthrough; default: /* all others failure */ return NET_RX_DROP; diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c index b34d050c968716d80bb1a3db7841086dac80478d..517e6493f5d14f4d6187d9b7746370e06894a4a6 100644 --- a/net/ieee802154/6lowpan/rx.c +++ b/net/ieee802154/6lowpan/rx.c @@ -35,11 +35,11 @@ static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res) net_warn_ratelimited("%s: received unknown dispatch\n", __func__); - /* fall-through */ + fallthrough; case RX_DROP_UNUSABLE: kfree_skb(skb); - /* fall-through */ + fallthrough; case RX_DROP: return NET_RX_DROP; case RX_QUEUED: diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 60db5a6487cc5f1343f51f94107b4e1acf731d76..87983e70f03f320fffae872a13f6befb34467204 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -661,13 +661,13 @@ config TCP_CONG_BBR BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to maximize network utilization and minimize queues. It builds an explicit - model of the the bottleneck delivery rate and path round-trip - propagation delay. It tolerates packet loss and delay unrelated to - congestion. It can operate over LAN, WAN, cellular, wifi, or cable - modem links. It can coexist with flows that use loss-based congestion - control, and can operate with shallow buffers, deep buffers, - bufferbloat, policers, or AQM schemes that do not provide a delay - signal. It requires the fq ("Fair Queue") pacing packet scheduler. + model of the bottleneck delivery rate and path round-trip propagation + delay. It tolerates packet loss and delay unrelated to congestion. It + can operate over LAN, WAN, cellular, wifi, or cable modem links. It can + coexist with flows that use loss-based congestion control, and can + operate with shallow buffers, deep buffers, bufferbloat, policers, or + AQM schemes that do not provide a delay signal. It requires the fq + ("Fair Queue") pacing packet scheduler. choice prompt "Default TCP congestion control" diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index c89b46fec153b66b972598e0e7279f440cdf6932..ffc5332f139062525e92e1f03b197524842a02f1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2121,7 +2121,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info) struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; - hlist_for_each_entry_rcu(tb, head, tb_hlist) + hlist_for_each_entry_rcu(tb, head, tb_hlist, + lockdep_rtnl_is_held()) __fib_info_notify_update(net, tb, info); } } diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 7afde8828b4c9c3f17f6ae3beea8d7f7d0b677eb..3f248a19faa30977b7551092b1787bbf887e8d21 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c @@ -3,7 +3,7 @@ * nf_nat_pptp.c * * NAT support for PPTP (Point to Point Tunneling Protocol). - * PPTP is a a protocol for creating virtual private networks. + * PPTP is a protocol for creating virtual private networks. * It is a specification defined by Microsoft and some vendors * working with Microsoft. PPTP is built on top of a modified * version of the Internet Generic Routing Encapsulation Protocol. diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index cc8049b100b2417f06fd23a669609123dd328f43..134e923822750a239055c8341ef7d971742b22af 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -446,7 +446,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[], unsigned int i, j; u8 nhg_fdb = 0; - if (len & (sizeof(struct nexthop_grp) - 1)) { + if (!len || len & (sizeof(struct nexthop_grp) - 1)) { NL_SET_ERR_MSG(extack, "Invalid length for nexthop group attribute"); return -EINVAL; @@ -1187,6 +1187,9 @@ static struct nexthop *nexthop_create_group(struct net *net, struct nexthop *nh; int i; + if (WARN_ON(!num_nh)) + return ERR_PTR(-EINVAL); + nh = nexthop_alloc(); if (!nh) return ERR_PTR(-ENOMEM); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 6fd4330287c279cb010827cff3790428df73cedc..407956be7deb30040546eb60bc9ae73b20253469 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } else if (!ipc.oif) { ipc.oif = inet->uc_index; } else if (ipv4_is_lbcast(daddr) && inet->uc_index) { - /* oif is set, packet is to local broadcast and + /* oif is set, packet is to local broadcast * and uc_index is set. oif is most likely set * by sk_bound_dev_if. If uc_index != oif check if the * oif is an L3 master and uc_index is an L3 slave. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8e761b8c47c6902b14b0c6804954c45c0a5369b8..01146b66d6669f37abf884643caf891d9972db90 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1893,12 +1893,13 @@ EXPORT_SYMBOL(ipv6_chk_addr); * 2. does the address exist on the specific device * (skip_dev_check = false) */ -int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, - const struct net_device *dev, bool skip_dev_check, - int strict, u32 banned_flags) +static struct net_device * +__ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, bool skip_dev_check, + int strict, u32 banned_flags) { unsigned int hash = inet6_addr_hash(net, addr); - const struct net_device *l3mdev; + struct net_device *l3mdev, *ndev; struct inet6_ifaddr *ifp; u32 ifp_flags; @@ -1909,10 +1910,11 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, dev = NULL; hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { - if (!net_eq(dev_net(ifp->idev->dev), net)) + ndev = ifp->idev->dev; + if (!net_eq(dev_net(ndev), net)) continue; - if (l3mdev_master_dev_rcu(ifp->idev->dev) != l3mdev) + if (l3mdev_master_dev_rcu(ndev) != l3mdev) continue; /* Decouple optimistic from tentative for evaluation here. @@ -1923,15 +1925,23 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, : ifp->flags; if (ipv6_addr_equal(&ifp->addr, addr) && !(ifp_flags&banned_flags) && - (!dev || ifp->idev->dev == dev || + (!dev || ndev == dev || !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) { rcu_read_unlock(); - return 1; + return ndev; } } rcu_read_unlock(); - return 0; + return NULL; +} + +int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, + const struct net_device *dev, bool skip_dev_check, + int strict, u32 banned_flags) +{ + return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check, + strict, banned_flags) ? 1 : 0; } EXPORT_SYMBOL(ipv6_chk_addr_and_flags); @@ -1990,35 +2000,11 @@ EXPORT_SYMBOL(ipv6_chk_prefix); * * The caller should be protected by RCU, or RTNL. */ -struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr) +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr, + struct net_device *dev) { - unsigned int hash = inet6_addr_hash(net, addr); - struct inet6_ifaddr *ifp, *result = NULL; - struct net_device *dev = NULL; - - rcu_read_lock(); - hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { - if (net_eq(dev_net(ifp->idev->dev), net) && - ipv6_addr_equal(&ifp->addr, addr)) { - result = ifp; - break; - } - } - - if (!result) { - struct rt6_info *rt; - - rt = rt6_lookup(net, addr, NULL, 0, NULL, 0); - if (rt) { - dev = rt->dst.dev; - ip6_rt_put(rt); - } - } else { - dev = result->idev->dev; - } - rcu_read_unlock(); - - return dev; + return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1, + IFA_F_TENTATIVE); } EXPORT_SYMBOL(ipv6_dev_find); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index f635914f42ec6057afe97fb03a78c4dd856886d9..a0217e5bf3bc1132969e0ed7421cf5eceff54d69 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -915,7 +915,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, struct metadata_dst *tun_dst, bool log_ecn_err) { - return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb); + + dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; + if (tpi->proto == htons(ETH_P_IP)) + dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; + + return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, log_ecn_err); } EXPORT_SYMBOL(ip6_tnl_rcv); diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 409e79b84a830dd22ffe7728f45d9dc65ec01df4..6d0e942d082d4ec0f66cb97fb20a7c466a0a0dfa 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = { .route_input = ip6_route_input, .fragment = ip6_fragment, .reroute = nf_ip6_reroute, -#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) - .br_defrag = nf_ct_frag6_gather, -#endif #if IS_MODULE(CONFIG_IPV6) .br_fragment = br_ip6_fragment, #endif diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index fac2135aa47b6ff8c291a807581c63209b1f7629..5b60a4bdd36af0f4ed9159c1cadc3cb31aa2e75e 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -21,6 +21,7 @@ #include #endif +static int two = 2; static int flowlabel_reflect_max = 0x7; static int auto_flowlabels_min; static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX; @@ -150,7 +151,7 @@ static struct ctl_table ipv6_table_template[] = { .mode = 0644, .proc_handler = proc_rt6_multipath_hash_policy, .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .extra2 = &two, }, { .procname = "seg6_flowlabel", diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 6ee9851ac7c6801f4918398ec14d44ccf2f84c1d..a95af62acb52987122562ae2ca84a6902729dc09 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -418,7 +418,7 @@ static void iucv_sock_close(struct sock *sk) sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } - /* fall through */ + fallthrough; case IUCV_DISCONN: sk->sk_state = IUCV_CLOSING; @@ -433,7 +433,7 @@ static void iucv_sock_close(struct sock *sk) iucv_sock_in_state(sk, IUCV_CLOSED, 0), timeo); } - /* fall through */ + fallthrough; case IUCV_CLOSING: sk->sk_state = IUCV_CLOSED; @@ -444,7 +444,7 @@ static void iucv_sock_close(struct sock *sk) skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); - /* fall through */ + fallthrough; default: iucv_sever_path(sk, 1); @@ -2111,10 +2111,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, kfree_skb(skb); break; } - /* fall through - and receive non-zero length data */ + fallthrough; /* and receive non-zero length data */ case (AF_IUCV_FLAG_SHT): /* shutdown request */ - /* fall through - and receive zero length data */ + fallthrough; /* and receive zero length data */ case 0: /* plain data frame */ IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index e71ca5aec684a42b321f31107e60903644216cbe..864326f150e2fcf76f9022c8ad5bb2cb0ac08a7c 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -154,7 +154,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex) EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu); /** - * l3mdev_fib_table - get FIB table id associated with an L3 + * l3mdev_fib_table_rcu - get FIB table id associated with an L3 * master interface * @dev: targeted interface */ diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 366f76c9003ddd1b125473218cd7daff445bcc80..314973033d038ba750525c1856e480bec41c87fd 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -405,18 +405,14 @@ ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre, return duration; } -u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, - struct ieee80211_rx_status *status, - int len) +static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw, + struct ieee80211_rx_status *status, + u32 *overhead) { - struct ieee80211_supported_band *sband; - const struct ieee80211_rate *rate; bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI; - bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE; int bw, streams; int group, idx; u32 duration; - bool cck; switch (status->bw) { case RATE_INFO_BW_20: @@ -437,20 +433,6 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, } switch (status->encoding) { - case RX_ENC_LEGACY: - if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ)) - return 0; - - sband = hw->wiphy->bands[status->band]; - if (!sband || status->rate_idx >= sband->n_bitrates) - return 0; - - rate = &sband->bitrates[status->rate_idx]; - cck = rate->flags & IEEE80211_RATE_MANDATORY_B; - - return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp, - cck, len); - case RX_ENC_VHT: streams = status->nss; idx = status->rate_idx; @@ -477,51 +459,144 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, duration = airtime_mcs_groups[group].duration[idx]; duration <<= airtime_mcs_groups[group].shift; + *overhead = 36 + (streams << 2); + + return duration; +} + + +u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, + struct ieee80211_rx_status *status, + int len) +{ + struct ieee80211_supported_band *sband; + u32 duration, overhead = 0; + + if (status->encoding == RX_ENC_LEGACY) { + const struct ieee80211_rate *rate; + bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE; + bool cck; + + if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ)) + return 0; + + sband = hw->wiphy->bands[status->band]; + if (!sband || status->rate_idx >= sband->n_bitrates) + return 0; + + rate = &sband->bitrates[status->rate_idx]; + cck = rate->flags & IEEE80211_RATE_MANDATORY_B; + + return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp, + cck, len); + } + + duration = ieee80211_get_rate_duration(hw, status, &overhead); + if (!duration) + return 0; + duration *= len; duration /= AVG_PKT_SIZE; duration /= 1024; - duration += 36 + (streams << 2); - - return duration; + return duration + overhead; } EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime); -static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw, - struct ieee80211_tx_rate *rate, - u8 band, int len) +static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw, + struct ieee80211_rx_status *stat, u8 band, + struct rate_info *ri) { - struct ieee80211_rx_status stat = { - .band = band, - }; + struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; + int i; - if (rate->idx < 0 || !rate->count) + if (!ri || !sband) + return false; + + stat->bw = ri->bw; + stat->nss = ri->nss; + stat->rate_idx = ri->mcs; + + if (ri->flags & RATE_INFO_FLAGS_HE_MCS) + stat->encoding = RX_ENC_HE; + else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS) + stat->encoding = RX_ENC_VHT; + else if (ri->flags & RATE_INFO_FLAGS_MCS) + stat->encoding = RX_ENC_HT; + else + stat->encoding = RX_ENC_LEGACY; + + if (ri->flags & RATE_INFO_FLAGS_SHORT_GI) + stat->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + stat->he_gi = ri->he_gi; + + if (stat->encoding != RX_ENC_LEGACY) + return true; + + stat->rate_idx = 0; + for (i = 0; i < sband->n_bitrates; i++) { + if (ri->legacy != sband->bitrates[i].bitrate) + continue; + + stat->rate_idx = i; + return true; + } + + return false; +} + +static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat, + struct ieee80211_hw *hw, + struct ieee80211_tx_rate *rate, + struct rate_info *ri, u8 band, int len) +{ + memset(stat, 0, sizeof(*stat)); + stat->band = band; + + if (ieee80211_fill_rate_info(hw, stat, band, ri)) return 0; + if (rate->idx < 0 || !rate->count) + return -1; + if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) - stat.bw = RATE_INFO_BW_80; + stat->bw = RATE_INFO_BW_80; else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - stat.bw = RATE_INFO_BW_40; + stat->bw = RATE_INFO_BW_40; else - stat.bw = RATE_INFO_BW_20; + stat->bw = RATE_INFO_BW_20; - stat.enc_flags = 0; + stat->enc_flags = 0; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - stat.enc_flags |= RX_ENC_FLAG_SHORTPRE; + stat->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - stat.enc_flags |= RX_ENC_FLAG_SHORT_GI; + stat->enc_flags |= RX_ENC_FLAG_SHORT_GI; - stat.rate_idx = rate->idx; + stat->rate_idx = rate->idx; if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { - stat.encoding = RX_ENC_VHT; - stat.rate_idx = ieee80211_rate_get_vht_mcs(rate); - stat.nss = ieee80211_rate_get_vht_nss(rate); + stat->encoding = RX_ENC_VHT; + stat->rate_idx = ieee80211_rate_get_vht_mcs(rate); + stat->nss = ieee80211_rate_get_vht_nss(rate); } else if (rate->flags & IEEE80211_TX_RC_MCS) { - stat.encoding = RX_ENC_HT; + stat->encoding = RX_ENC_HT; } else { - stat.encoding = RX_ENC_LEGACY; + stat->encoding = RX_ENC_LEGACY; } + return 0; +} + +static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw, + struct ieee80211_tx_rate *rate, + struct rate_info *ri, + u8 band, int len) +{ + struct ieee80211_rx_status stat; + + if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len)) + return 0; + return ieee80211_calc_rx_airtime(hw, &stat, len); } @@ -536,7 +611,7 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw, struct ieee80211_tx_rate *rate = &info->status.rates[i]; u32 cur_duration; - cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, + cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL, info->band, len); if (!cur_duration) break; @@ -572,26 +647,41 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, if (pubsta) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_rx_status stat; struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate; - u32 airtime; + struct rate_info *ri = &sta->tx_stats.last_rate_info; + u32 duration, overhead; + u8 agg_shift; - if (!(rate->flags & (IEEE80211_TX_RC_VHT_MCS | - IEEE80211_TX_RC_MCS))) - ampdu = false; + if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len)) + return 0; + if (stat.encoding == RX_ENC_LEGACY || !ampdu) + return ieee80211_calc_rx_airtime(hw, &stat, len); + + duration = ieee80211_get_rate_duration(hw, &stat, &overhead); /* * Assume that HT/VHT transmission on any AC except VO will * use aggregation. Since we don't have reliable reporting - * of aggregation length, assume an average of 16. + * of aggregation length, assume an average size based on the + * tx rate. * This will not be very accurate, but much better than simply - * assuming un-aggregated tx. + * assuming un-aggregated tx in all cases. */ - airtime = ieee80211_calc_tx_airtime_rate(hw, rate, band, - ampdu ? len * 16 : len); - if (ampdu) - airtime /= 16; - - return airtime; + if (duration > 400) /* <= VHT20 MCS2 1S */ + agg_shift = 1; + else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */ + agg_shift = 2; + else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */ + agg_shift = 3; + else + agg_shift = 4; + + duration *= len; + duration /= AVG_PKT_SIZE; + duration /= 1024; + + return duration + (overhead >> agg_shift); } if (!conf) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 9d398c9daa4c649a02bf9bf959d5e523f46fc700..d5010116cf4d130569f5e62077e74ea7621ee7dd 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -524,7 +524,7 @@ struct ieee80211_sta_rx_stats { * @status_stats.retry_failed: # of frames that failed after retry * @status_stats.retry_count: # of retries attempted * @status_stats.lost_packets: # of lost packets - * @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet + * @status_stats.last_pkt_time: timestamp of last ACKed packet * @status_stats.msdu_retries: # of MSDU retries * @status_stats.msdu_failed: # of failed MSDUs * @status_stats.last_ack: last ack timestamp (jiffies) @@ -597,7 +597,7 @@ struct sta_info { unsigned long filtered; unsigned long retry_failed, retry_count; unsigned int lost_packets; - unsigned long last_tdls_pkt_time; + unsigned long last_pkt_time; u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; unsigned long last_ack; @@ -611,6 +611,7 @@ struct sta_info { u64 packets[IEEE80211_NUM_ACS]; u64 bytes[IEEE80211_NUM_ACS]; struct ieee80211_tx_rate last_rate; + struct rate_info last_rate_info; u64 msdu[IEEE80211_NUM_TIDS + 1]; } tx_stats; u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index adb1d30ce06ef3dffae17c79f27f9e6ead3b2727..0794396a79884bba6a83490c15bd193e15163f82 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -755,12 +755,16 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, * - current throughput (higher value for higher tpt)? */ #define STA_LOST_PKT_THRESHOLD 50 +#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */ #define STA_LOST_TDLS_PKT_THRESHOLD 10 #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ static void ieee80211_lost_packet(struct sta_info *sta, struct ieee80211_tx_info *info) { + unsigned long pkt_time = STA_LOST_PKT_TIME; + unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD; + /* If driver relies on its own algorithm for station kickout, skip * mac80211 packet loss mechanism. */ @@ -773,21 +777,20 @@ static void ieee80211_lost_packet(struct sta_info *sta, return; sta->status_stats.lost_packets++; - if (!sta->sta.tdls && - sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD) - return; + if (sta->sta.tdls) { + pkt_time = STA_LOST_TDLS_PKT_TIME; + pkt_thr = STA_LOST_PKT_THRESHOLD; + } /* * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD * of the last packets were lost, and that no ACK was received in the * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss * mechanism. + * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME */ - if (sta->sta.tdls && - (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || - time_before(jiffies, - sta->status_stats.last_tdls_pkt_time + - STA_LOST_TDLS_PKT_TIME))) + if (sta->status_stats.lost_packets < pkt_thr || + !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time)) return; cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, @@ -1033,9 +1036,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, sta->status_stats.lost_packets = 0; /* Track when last TDLS packet was ACKed */ - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->status_stats.last_tdls_pkt_time = - jiffies; + sta->status_stats.last_pkt_time = jiffies; } else if (noack_success) { /* nothing to do here, do not account as lost */ } else { @@ -1137,9 +1138,17 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = status->info; struct ieee80211_sta *pubsta = status->sta; struct ieee80211_supported_band *sband; + struct sta_info *sta; int retry_count; bool acked, noack_success; + if (pubsta) { + sta = container_of(pubsta, struct sta_info, sta); + + if (status->rate) + sta->tx_stats.last_rate_info = *status->rate; + } + if (status->skb) return __ieee80211_tx_status(hw, status); @@ -1154,10 +1163,6 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED); if (pubsta) { - struct sta_info *sta; - - sta = container_of(pubsta, struct sta_info, sta); - if (!acked && !noack_success) sta->status_stats.retry_failed++; sta->status_stats.retry_count += retry_count; @@ -1168,9 +1173,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, if (sta->status_stats.lost_packets) sta->status_stats.lost_packets = 0; - /* Track when last TDLS packet was ACKed */ - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->status_stats.last_tdls_pkt_time = jiffies; + /* Track when last packet was ACKed */ + sta->status_stats.last_pkt_time = jiffies; } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { return; } else if (noack_success) { @@ -1259,8 +1263,7 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw, if (sta->status_stats.lost_packets) sta->status_stats.lost_packets = 0; - if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) - sta->status_stats.last_tdls_pkt_time = jiffies; + sta->status_stats.last_pkt_time = jiffies; } else { ieee80211_lost_packet(sta, info); } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 6fdd0c9f865a5d73d9d08759fbcc2edd42d628d8..f2868a8a50c302d98bf6fbf3b1b13130e13fa05b 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1516,7 +1516,7 @@ static void mpls_ifdown(struct net_device *dev, int event) case NETDEV_DOWN: case NETDEV_UNREGISTER: nh_flags |= RTNH_F_DEAD; - /* fall through */ + fallthrough; case NETDEV_CHANGE: nh_flags |= RTNH_F_LINKDOWN; break; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 8c1d1a5957017f7cf07d712679e09b35ac03b844..365ba96c84b06dda64eba57b1e811cf9478f97de 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -193,7 +193,6 @@ static void mptcp_check_data_fin_ack(struct sock *sk) sk->sk_state_change(sk); break; case TCP_CLOSING: - fallthrough; case TCP_LAST_ACK: inet_sk_state_store(sk, TCP_CLOSE); sk->sk_state_change(sk); @@ -725,8 +724,10 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, if (!psize) return -EINVAL; - if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) + if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) { + iov_iter_revert(&msg->msg_iter, psize); return -ENOMEM; + } } else { offset = dfrag->offset; psize = min_t(size_t, dfrag->data_len, avail_size); @@ -737,8 +738,11 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, */ ret = do_tcp_sendpages(ssk, page, offset, psize, msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT); - if (ret <= 0) + if (ret <= 0) { + if (!retransmission) + iov_iter_revert(&msg->msg_iter, psize); return ret; + } frag_truesize += ret; if (!retransmission) { @@ -887,7 +891,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out; } -wait_for_sndbuf: __mptcp_flush_join_list(msk); ssk = mptcp_subflow_get_send(msk); while (!sk_stream_memory_free(sk) || @@ -977,7 +980,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) */ mptcp_set_timeout(sk, ssk); release_sock(ssk); - goto wait_for_sndbuf; + goto restart; } } } @@ -1388,7 +1391,9 @@ static void mptcp_worker(struct work_struct *work) struct mptcp_data_frag *dfrag; u64 orig_write_seq; size_t copied = 0; - struct msghdr msg; + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT, + }; long timeo = 0; lock_sock(sk); @@ -1421,7 +1426,6 @@ static void mptcp_worker(struct work_struct *work) lock_sock(ssk); - msg.msg_flags = MSG_DONTWAIT; orig_len = dfrag->data_len; orig_offset = dfrag->offset; orig_write_seq = dfrag->data_seq; @@ -1535,7 +1539,7 @@ static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) case TCP_LISTEN: if (!(how & RCV_SHUTDOWN)) break; - /* fall through */ + fallthrough; case TCP_SYN_SENT: tcp_disconnect(ssk, O_NONBLOCK); break; diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 1f387be7827bed4ebc6c64958f4c43238761cb8f..f1be3e3f6425e253df876ceee8a490e3eed98765 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -474,7 +474,7 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) switch (nd->state) { case ncsi_dev_state_suspend: nd->state = ncsi_dev_state_suspend_select; - /* Fall through */ + fallthrough; case ncsi_dev_state_suspend_select: ndp->pending_req_num = 1; @@ -1302,7 +1302,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) switch (nd->state) { case ncsi_dev_state_probe: nd->state = ncsi_dev_state_probe_deselect; - /* Fall through */ + fallthrough; case ncsi_dev_state_probe_deselect: ndp->pending_req_num = 8; diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 32b028853a7cf53191618c04dc2eb3c498defabb..dc2e7da2742abde9e72ea5d1400a1e9fb84bd2dc 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -315,7 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); - /* fall through */ + fallthrough; case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index 153d89647c8747a957b66093ab7f787b58b431b2..68260d91c98870e74ccae77db3b0f496e3d194d2 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -318,7 +318,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) case CHECKSUM_NONE: skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); - /* fall through */ + fallthrough; case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 1f44d523b5121c84057f9f66dc902203090dbd57..5105d4250012ceecacba98b008f8c768fcd28dfa 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Connection tracking support for PPTP (Point to Point Tunneling Protocol). - * PPTP is a a protocol for creating virtual private networks. + * PPTP is a protocol for creating virtual private networks. * It is a specification defined by Microsoft and some vendors * working with Microsoft. PPTP is built on top of a modified * version of the Internet Generic Routing Encapsulation Protocol. diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 4f897b14b6069d4e15df5d22fa89a47e9a892fab..810cca24b399019862f185a65d064aa56aa8a21c 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 + #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT @@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, u_int32_t offset, count; unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + bool ignore = false; if (sctp_error(skb, dataoff, state)) return -NF_ACCEPT; @@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_HEARTBEAT_ACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.last_dir = dir; + ignore = true; + continue; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) { pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); ct->proto.sctp.vtag[dir] = sh->vtag; } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out_unlock; + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || + ct->proto.sctp.last_dir == dir) + goto out_unlock; + + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.vtag[dir] = sh->vtag; + ct->proto.sctp.vtag[!dir] = 0; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } } @@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + /* allow but do not refresh timeout */ + if (ignore) + return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 6892e497781c12235350ce1f6e3f9d573d6d4ba4..e8c86ee4c1c482a8674a8fd52e696e745b60e9f7 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1152,7 +1152,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, && (old_state == TCP_CONNTRACK_SYN_RECV || old_state == TCP_CONNTRACK_ESTABLISHED) && new_state == TCP_CONNTRACK_ESTABLISHED) { - /* Set ASSURED if we see see valid ack in ESTABLISHED + /* Set ASSURED if we see valid ack in ESTABLISHED after SYN_RECV or a valid answer for a picked up connection. */ set_bit(IPS_ASSURED_BIT, &ct->status); diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 760ca242281655590ddf0a20ec25c8b73930e06f..af402f458ee020da5946327a23ac6b5f68d7b5a0 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -81,18 +81,6 @@ static bool udp_error(struct sk_buff *skb, return false; } -static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct, - struct sk_buff *skb, - enum ip_conntrack_info ctinfo, - u32 extra_jiffies) -{ - if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY && - ct->status & IPS_NAT_CLASH)) - nf_ct_kill(ct); - else - nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies); -} - /* Returns verdict for packet, and may modify conntracktype */ int nf_conntrack_udp_packet(struct nf_conn *ct, struct sk_buff *skb, @@ -124,12 +112,15 @@ int nf_conntrack_udp_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, extra); + /* never set ASSURED for IPS_NAT_CLASH, they time out soon */ + if (unlikely((ct->status & IPS_NAT_CLASH))) + return NF_ACCEPT; + /* Also, more likely to be important, and not a probe */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_ASSURED, ct); } else { - nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo, - timeouts[UDP_CT_UNREPLIED]); + nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]); } return NF_ACCEPT; } @@ -206,12 +197,15 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct, if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_REPLIED]); + + if (unlikely((ct->status & IPS_NAT_CLASH))) + return NF_ACCEPT; + /* Also, more likely to be important, and not a probe */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_ASSURED, ct); } else { - nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo, - timeouts[UDP_CT_UNREPLIED]); + nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]); } return NF_ACCEPT; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d878e34e3354bda47d56e642c20e344592b51e32..b7dc1cbf40ea4498ad205f3598a4fbdae425c7aa 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -815,11 +815,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) - goto err; + goto err_fill_table_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_table_info: kfree_skb(skb2); return err; } @@ -1563,11 +1563,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) - goto err; + goto err_fill_chain_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_chain_info: kfree_skb(skb2); return err; } @@ -2018,8 +2018,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (nla[NFTA_CHAIN_NAME]) { chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); } else { - if (!(flags & NFT_CHAIN_BINDING)) - return -EINVAL; + if (!(flags & NFT_CHAIN_BINDING)) { + err = -EINVAL; + goto err1; + } snprintf(name, sizeof(name), "__chain%llu", ++chain_id); chain->name = kstrdup(name, GFP_KERNEL); @@ -3006,11 +3008,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule, NULL); if (err < 0) - goto err; + goto err_fill_rule_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_rule_info: kfree_skb(skb2); return err; } @@ -3768,7 +3770,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + if (set->udata && + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure; nest = nla_nest_start_noflag(skb, NFTA_SET_DESC); @@ -3965,11 +3968,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) - goto err; + goto err_fill_set_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_set_info: kfree_skb(skb2); return err; } @@ -4857,24 +4860,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb == NULL) - goto err1; + return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, NFT_MSG_NEWSETELEM, 0, set, &elem); if (err < 0) - goto err2; + goto err_fill_setelem; - err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); - /* This avoids a loop in nfnetlink. */ - if (err < 0) - goto err1; + return nfnetlink_unicast(skb, ctx->net, ctx->portid); - return 0; -err2: +err_fill_setelem: kfree_skb(skb); -err1: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + return err; } /* called with rcu_read_lock held */ @@ -6179,10 +6176,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); if (err < 0) - goto err; + goto err_fill_obj_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_obj_info: kfree_skb(skb2); return err; } @@ -7042,10 +7040,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, NFT_MSG_NEWFLOWTABLE, 0, family, flowtable, &flowtable->hook_list); if (err < 0) - goto err; + goto err_fill_flowtable_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_flowtable_info: kfree_skb(skb2); return err; } @@ -7231,10 +7230,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) - goto err; + goto err_fill_gen_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_gen_info: kfree_skb(skb2); return err; } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 5f24edf958309f46afcdfa7428498ca7769841f9..3a2e64e13b227ba9431253e3f8587cc9cd3a7cd6 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -149,10 +149,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) { - return netlink_unicast(net->nfnl, skb, portid, flags); + int err; + + err = nlmsg_unicast(net->nfnl, skb, portid); + if (err == -EAGAIN) + err = -ENOBUFS; + + return err; } EXPORT_SYMBOL_GPL(nfnetlink_unicast); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index f029924198503cdf81339fe9dbd1cfbe68d7e73b..b35e8d9a5b37ec14cc6eb39e55d0c1ade1791da8 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst) goto out; } } - nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); out: inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index dadfc06245a36fd0f253e7e94093778add272f96..d1d8bca03b4f0b9048c7f01a4c16d59247021d09 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, net, queue->peer_portid); if (err < 0) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 6428856ccbecaf1b3668934ba707553b7e9d8f33..8e56f353ff35120d765f64ea70d3df9b56af7d37 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -27,8 +27,6 @@ struct nft_xt_match_priv { void *info; }; -static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1); - static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, const char *tablename) { @@ -215,6 +213,17 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) return 0; } +static void nft_compat_wait_for_destructors(void) +{ + /* xtables matches or targets can have side effects, e.g. + * creation/destruction of /proc files. + * The xt ->destroy functions are run asynchronously from + * work queue. If we have pending invocations we thus + * need to wait for those to finish. + */ + nf_tables_trans_destroy_flush_work(); +} + static int nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -238,14 +247,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); - /* xtables matches or targets can have side effects, e.g. - * creation/destruction of /proc files. - * The xt ->destroy functions are run asynchronously from - * work queue. If we have pending invocations we thus - * need to wait for those to finish. - */ - if (refcount_read(&nft_compat_pending_destroy) > 1) - nf_tables_trans_destroy_flush_work(); + nft_compat_wait_for_destructors(); ret = xt_check_target(&par, size, proto, inv); if (ret < 0) @@ -260,7 +262,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr) { - refcount_dec(&nft_compat_pending_destroy); module_put(me); kfree(expr->ops); } @@ -468,6 +469,8 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); + nft_compat_wait_for_destructors(); + return xt_check_match(&par, size, proto, inv); } @@ -716,14 +719,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { static struct nft_expr_type nft_match_type; -static void nft_mt_tg_deactivate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - enum nft_trans_phase phase) -{ - if (phase == NFT_TRANS_COMMIT) - refcount_inc(&nft_compat_pending_destroy); -} - static const struct nft_expr_ops * nft_match_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -762,7 +757,6 @@ nft_match_select_ops(const struct nft_ctx *ctx, ops->type = &nft_match_type; ops->eval = nft_match_eval; ops->init = nft_match_init; - ops->deactivate = nft_mt_tg_deactivate, ops->destroy = nft_match_destroy; ops->dump = nft_match_dump; ops->validate = nft_match_validate; @@ -853,7 +847,6 @@ nft_target_select_ops(const struct nft_ctx *ctx, ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); ops->init = nft_target_init; ops->destroy = nft_target_destroy; - ops->deactivate = nft_mt_tg_deactivate, ops->dump = nft_target_dump; ops->validate = nft_target_validate; ops->data = target; @@ -917,8 +910,6 @@ static void __exit nft_compat_module_exit(void) nfnetlink_subsys_unregister(&nfnl_compat_subsys); nft_unregister_expr(&nft_target_type); nft_unregister_expr(&nft_match_type); - - WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1); } MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 07782836fad6ead16711db91117d837abbf66765..3c48cdc8935df7030e983581c56bd0957d71175f 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); if (priv->flags & NFT_EXTHDR_F_PRESENT) { - *dest = (err >= 0); + nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; @@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr, err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type); if (priv->flags & NFT_EXTHDR_F_PRESENT) { - *dest = (err >= 0); + nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 3b9b97aa4b32e3550358033bfb7208091424f261..3a6c84fb2c90dc09e352113e016c0a26e076f272 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -102,7 +102,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, } if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || - ct->status & IPS_SEQ_ADJUST) + ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH)) goto out; if (!nf_ct_is_confirmed(ct)) diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index ed7cb9f747f60fd624d2ddc814bb59d68ea4cc6a..7a2e59638499133e38712c79a035d71c63ea5cd6 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 4b2834fd17b26e47bce3883b92722bc0180922f8..217ab3644c25b1ae95c1609433fdb964c7d41349 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -218,11 +218,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *new, struct nft_set_ext **ext) { + bool overlap = false, dup_end_left = false, dup_end_right = false; struct nft_rbtree *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); struct nft_rbtree_elem *rbe; struct rb_node *parent, **p; - bool overlap = false; int d; /* Detect overlaps as we descend the tree. Set the flag in these cases: @@ -238,24 +238,44 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, * * b1. _ _ __>| !_ _ __| (insert end before existing start) * b2. _ _ ___| !_ _ _>| (insert end after existing start) - * b3. _ _ ___! >|_ _ __| (insert start after existing end) + * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf) + * '--' no nodes falling in this range + * b4. >|_ _ ! (insert start before existing start) * * Case a3. resolves to b3.: * - if the inserted start element is the leftmost, because the '0' * element in the tree serves as end element - * - otherwise, if an existing end is found. Note that end elements are - * always inserted after corresponding start elements. + * - otherwise, if an existing end is found immediately to the left. If + * there are existing nodes in between, we need to further descend the + * tree before we can conclude the new start isn't causing an overlap + * + * or to b4., which, preceded by a3., means we already traversed one or + * more existing intervals entirely, from the right. * * For a new, rightmost pair of elements, we'll hit cases b3. and b2., * in that order. * * The flag is also cleared in two special cases: * - * b4. |__ _ _!|<_ _ _ (insert start right before existing end) - * b5. |__ _ >|!__ _ _ (insert end right after existing start) + * b5. |__ _ _!|<_ _ _ (insert start right before existing end) + * b6. |__ _ >|!__ _ _ (insert end right after existing start) * * which always happen as last step and imply that no further * overlapping is possible. + * + * Another special case comes from the fact that start elements matching + * an already existing start element are allowed: insertion is not + * performed but we return -EEXIST in that case, and the error will be + * cleared by the caller if NLM_F_EXCL is not present in the request. + * This way, request for insertion of an exact overlap isn't reported as + * error to userspace if not desired. + * + * However, if the existing start matches a pre-existing start, but the + * end element doesn't match the corresponding pre-existing end element, + * we need to report a partial overlap. This is a local condition that + * can be noticed without need for a tracking flag, by checking for a + * local duplicated end for a corresponding start, from left and right, + * separately. */ parent = NULL; @@ -272,26 +292,41 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (nft_rbtree_interval_start(new)) { if (nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) + !nft_set_elem_expired(&rbe->ext) && !*p) overlap = false; } else { + if (dup_end_left && !*p) + return -ENOTEMPTY; + overlap = nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, genmask) && !nft_set_elem_expired(&rbe->ext); + + if (overlap) { + dup_end_right = true; + continue; + } } } else if (d > 0) { p = &parent->rb_right; if (nft_rbtree_interval_end(new)) { + if (dup_end_right && !*p) + return -ENOTEMPTY; + overlap = nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, genmask) && !nft_set_elem_expired(&rbe->ext); - } else if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask) && + + if (overlap) { + dup_end_left = true; + continue; + } + } else if (nft_set_elem_active(&rbe->ext, genmask) && !nft_set_elem_expired(&rbe->ext)) { - overlap = true; + overlap = nft_rbtree_interval_end(rbe); } } else { if (nft_rbtree_interval_end(rbe) && @@ -316,6 +351,8 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, p = &parent->rb_left; } } + + dup_end_left = dup_end_right = false; } if (overlap) diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 19bef176145eb9561261489d4b4c1b9b688231db..606411869698e00067f99c0ce56a7d3657b05f82 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -640,7 +640,7 @@ static void __net_exit recent_proc_net_exit(struct net *net) struct recent_table *t; /* recent_net_exit() is called before recent_mt_destroy(). Make sure - * that the parent xt_recent proc entry is is empty before trying to + * that the parent xt_recent proc entry is empty before trying to * remove it. */ spin_lock_bh(&recent_lock); diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index d07de2c0fbc765c01fe4a0bd743664180cd0e9e1..f73a8382c275e0377c2c730c7ed6add3d4c3f93b 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@ -537,6 +538,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL; @@ -580,6 +583,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ if (entry == NULL) return -ENOENT; @@ -597,6 +606,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); + if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, @@ -606,40 +618,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, audit_log_end(audit_buf); } - if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); + } #if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } #endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; #if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; #endif /* IPv6 */ - } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry); return ret_val; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index b5f30d7d30d06847a6df54080689779f2675d76d..d2d1448274f5616439e8a378258240865dbb098b 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -353,7 +353,7 @@ static void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); - if (skb_queue_empty(&sk->sk_receive_queue)) + if (skb_queue_empty_lockless(&sk->sk_receive_queue)) clear_bit(NETLINK_S_CONGESTED, &nlk->state); if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) wake_up_interruptible(&nlk->wait); diff --git a/net/netlink/policy.c b/net/netlink/policy.c index f6491853c797155cb564f3d075eb372d333c4bd9..641ffbdd977abf208c6a834aceb7636335633682 100644 --- a/net/netlink/policy.c +++ b/net/netlink/policy.c @@ -51,6 +51,9 @@ static int add_policy(struct nl_policy_dump **statep, if (!state) return -ENOMEM; + memset(&state->policies[state->n_alloc], 0, + flex_array_size(state, policies, n_alloc - state->n_alloc)); + state->policies[state->n_alloc].policy = policy; state->policies[state->n_alloc].maxtype = maxtype; state->n_alloc = n_alloc; @@ -185,7 +188,7 @@ int netlink_policy_dump_write(struct sk_buff *skb, unsigned long _state) goto next; case NLA_NESTED: type = NL_ATTR_TYPE_NESTED; - /* fall through */ + fallthrough; case NLA_NESTED_ARRAY: if (pt->type == NLA_NESTED_ARRAY) type = NL_ATTR_TYPE_NESTED_ARRAY; diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c index 2bef3779f8935eb09e011e3cbd35b97e31f59954..69e58906c32b1e8308cd423dc59eb57670d14638 100644 --- a/net/netrom/nr_in.c +++ b/net/netrom/nr_in.c @@ -122,7 +122,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, case NR_DISCREQ: nr_write_internal(sk, NR_DISCACK); - /* fall through */ + fallthrough; case NR_DISCACK: nr_disconnect(sk, 0); break; diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 0891ee02ca4fe03145066852035de38e62a06ee2..78da5eab252a0df1af0b69da54b87b4dc06e98da 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -263,7 +263,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, case 3: re_sort_routes(nr_node, 0, 1); re_sort_routes(nr_node, 1, 2); - /* fall through */ + fallthrough; case 2: re_sort_routes(nr_node, 0, 1); case 1: @@ -356,7 +356,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n switch (i) { case 0: nr_node->routes[0] = nr_node->routes[1]; - /* fall through */ + fallthrough; case 1: nr_node->routes[1] = nr_node->routes[2]; case 2: @@ -479,7 +479,7 @@ static int nr_dec_obs(void) switch (i) { case 0: s->routes[0] = s->routes[1]; - /* Fallthrough */ + fallthrough; case 1: s->routes[1] = s->routes[2]; case 2: @@ -526,7 +526,7 @@ void nr_rt_device_down(struct net_device *dev) switch (i) { case 0: t->routes[0] = t->routes[1]; - /* fall through */ + fallthrough; case 1: t->routes[1] = t->routes[2]; case 2: diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 98d393e70de327d95e6c9548c0b2b1f6b6101ed5..a3f1204f1ed27078673b74a4db679ced00366c2d 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -778,7 +778,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, } } /* Non-ICMP, fall thru to initialize if needed. */ - /* fall through */ + fallthrough; case IP_CT_NEW: /* Seen it before? This can happen for loopback, retrans, * or local packets. @@ -1540,7 +1540,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, switch (type) { case OVS_CT_ATTR_FORCE_COMMIT: info->force = true; - /* fall through. */ + fallthrough; case OVS_CT_ATTR_COMMIT: info->commit = true; break; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 03942c30d83e67bc412a4748edd70beedb42dfd3..b03d142ec82efa15ff3cfe7d407989e86b5a7251 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -675,7 +675,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) case -EINVAL: memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); - /* fall-through */ + fallthrough; case -EPROTO: skb->transport_header = skb->network_header; error = 0; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 479c257ded7335616da35aa5f7880b54aa2f9fe0..2b33e977a905911c30ecc9d1a9920bccad04f23f 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2170,7 +2170,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; - unsigned short macoff, netoff, hdrlen; + unsigned short macoff, hdrlen; + unsigned int netoff; struct sk_buff *copy_skb = NULL; struct timespec64 ts; __u32 ts_status; @@ -2239,6 +2240,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } macoff = netoff - maclen; } + if (netoff > USHRT_MAX) { + atomic_inc(&po->tp_drops); + goto drop_n_restore; + } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && @@ -4061,7 +4066,7 @@ static int packet_notifier(struct notifier_block *this, case NETDEV_UNREGISTER: if (po->mclist) packet_dev_mclist_delete(dev, &po->mclist); - /* fallthrough */ + fallthrough; case NETDEV_DOWN: if (dev->ifindex == po->ifindex) { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index e47d09aca4af465b335edfc91a95e15e10a31fe0..a1525916885ae9741972e32b16363319edc1aa31 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -368,7 +368,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) err = -EINVAL; goto out; } - /* fall through */ + fallthrough; case PNS_PEP_DISABLE_REQ: atomic_set(&pn->tx_credits, 0); pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); @@ -385,7 +385,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) case PNS_PIPE_ALIGNED_DATA: __skb_pull(skb, 1); - /* fall through */ + fallthrough; case PNS_PIPE_DATA: __skb_pull(skb, 3); /* Pipe data header */ if (!pn_flow_safe(pn->rx_fc)) { @@ -417,11 +417,11 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) err = pipe_rcv_created(sk, skb); if (err) break; - /* fall through */ + fallthrough; case PNS_PIPE_RESET_IND: if (!pn->init_enable) break; - /* fall through */ + fallthrough; case PNS_PIPE_ENABLED_IND: if (!pn_flow_safe(pn->tx_fc)) { atomic_set(&pn->tx_credits, 1); @@ -555,7 +555,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) switch (hdr->message_id) { case PNS_PIPE_ALIGNED_DATA: __skb_pull(skb, 1); - /* fall through */ + fallthrough; case PNS_PIPE_DATA: __skb_pull(skb, 3); /* Pipe data header */ if (!pn_flow_safe(pn->rx_fc)) { diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index b4c0db0b7d31ca3303ac99d9e87bdfef7ffdc284..90c558f89d46565ee5d5845d0ca97c095a8287a8 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -692,23 +692,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { + u32 min_port; int rc; mutex_lock(&qrtr_port_lock); if (!*port) { - rc = idr_alloc(&qrtr_ports, ipc, - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, - GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = QRTR_MIN_EPH_SOCKET; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC); + if (!rc) + *port = min_port; } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + min_port = 0; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC); } else { - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = *port; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC); + if (!rc) + *port = min_port; } mutex_unlock(&qrtr_port_lock); diff --git a/net/rds/send.c b/net/rds/send.c index 9a529a01cdc6a10a13d08efcd32947bf22290849..985d0b7713acc5f0f3e31c5944348322e3879c5f 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -934,7 +934,7 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs, case RDS_CMSG_ZCOPY_COOKIE: zcopy_cookie = true; - /* fall through */ + fallthrough; case RDS_CMSG_RDMA_DEST: case RDS_CMSG_RDMA_MAP: diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c index 0d4fab2be82bdf90deac540cac8181bb9ee00e85..6af786d66b03a9ae764cd34f296da7d1569948c7 100644 --- a/net/rose/rose_in.c +++ b/net/rose/rose_in.c @@ -216,7 +216,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety switch (frametype) { case ROSE_RESET_REQUEST: rose_write_internal(sk, ROSE_RESET_CONFIRMATION); - /* fall through */ + fallthrough; case ROSE_RESET_CONFIRMATION: rose_stop_timer(sk); rose_start_idletimer(sk); diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 5277631fa14c6caa1087bf36282a6bbb7936264d..6e35703ff353d731ea07012963931437cf96a7be 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -343,7 +343,7 @@ static int rose_del_node(struct rose_route_struct *rose_route, case 0: rose_node->neighbour[0] = rose_node->neighbour[1]; - /* fall through */ + fallthrough; case 1: rose_node->neighbour[1] = rose_node->neighbour[2]; @@ -505,7 +505,7 @@ void rose_rt_device_down(struct net_device *dev) switch (i) { case 0: t->neighbour[0] = t->neighbour[1]; - /* fall through */ + fallthrough; case 1: t->neighbour[1] = t->neighbour[2]; case 2: diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index e6725a6de015fb5081fc4c9bb5a62dce70794f56..186c8a889b164018c5ad4a16f82dea756220c04c 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -246,7 +246,7 @@ static int rxrpc_listen(struct socket *sock, int backlog) ret = 0; break; } - /* Fall through */ + fallthrough; default: ret = -EBUSY; break; @@ -545,7 +545,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) rx->local = local; rx->sk.sk_state = RXRPC_CLIENT_BOUND; - /* Fall through */ + fallthrough; case RXRPC_CLIENT_BOUND: if (!m->msg_name && @@ -553,7 +553,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) m->msg_name = &rx->connect_srx; m->msg_namelen = sizeof(rx->connect_srx); } - /* Fall through */ + fallthrough; case RXRPC_SERVER_BOUND: case RXRPC_SERVER_LISTENING: ret = rxrpc_do_sendmsg(rx, m, len); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 6d29a3603a3e6e262ca80bf1d137331e1bb5bd7d..884cff7bb169a6add2fce6635aa250d9f6caacf7 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -488,7 +488,6 @@ enum rxrpc_call_flag { RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ - RXRPC_CALL_PINGING, /* Ping in process */ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ @@ -673,9 +672,13 @@ struct rxrpc_call { rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ - /* ping management */ - rxrpc_serial_t ping_serial; /* Last ping sent */ - ktime_t ping_time; /* Time last ping sent */ + /* RTT management */ + rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ + ktime_t rtt_sent_at[4]; /* Time packet sent */ + unsigned long rtt_avail; /* Mask of available slots in bits 0-3, + * Mask of pending samples in 8-11 */ +#define RXRPC_CALL_RTT_AVAIL_MASK 0xf +#define RXRPC_CALL_RTT_PEND_SHIFT 8 /* transmission-phase ACK management */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ @@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, /* * rtt.c */ -void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, +void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int, rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); void rxrpc_peer_init_rtt(struct rxrpc_peer *); diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 032ed76c0166da24f96a9a77fe954b1697aba52b..ef160566aa9a1e8c4664256c82bb6e7c129fb4bf 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -622,7 +622,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx) case RXRPC_CALL_SERVER_ACCEPTING: __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); abort = true; - /* fall through */ + fallthrough; case RXRPC_CALL_COMPLETE: ret = call->error; goto out_discard; diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 38a46167523fa435e673483878a4ea145a9c2e8b..a40fae0139423a6672d99a0095bf52d42d2156cb 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; call->rxnet = rxnet; + call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK; atomic_inc(&rxnet->nr_calls); return call; diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index f2a1a5dbb5a7bb21d47f33f3e2cf8fd1acd88192..159e3eda7914281de807c362b16aba1ecd78cb1e 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -881,7 +881,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE; rxrpc_activate_channels_locked(conn); } - /* fall through */ + fallthrough; case RXRPC_CONN_CLIENT_ACTIVE: if (list_empty(&conn->waiting_calls)) { rxrpc_deactivate_one_channel(conn, channel); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 767579328a0697b13e120a886ebddda4ec962c81..667c44aa5a63cb1abd90564ac67dccf094a3c0b2 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -608,36 +608,57 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) } /* - * Process a requested ACK. + * See if there's a cached RTT probe to complete. */ -static void rxrpc_input_requested_ack(struct rxrpc_call *call, - ktime_t resp_time, - rxrpc_serial_t orig_serial, - rxrpc_serial_t ack_serial) +static void rxrpc_complete_rtt_probe(struct rxrpc_call *call, + ktime_t resp_time, + rxrpc_serial_t acked_serial, + rxrpc_serial_t ack_serial, + enum rxrpc_rtt_rx_trace type) { - struct rxrpc_skb_priv *sp; - struct sk_buff *skb; + rxrpc_serial_t orig_serial; + unsigned long avail; ktime_t sent_at; - int ix; + bool matched = false; + int i; - for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) { - skb = call->rxtx_buffer[ix]; - if (!skb) - continue; + avail = READ_ONCE(call->rtt_avail); + smp_rmb(); /* Read avail bits before accessing data. */ - sent_at = skb->tstamp; - smp_rmb(); /* Read timestamp before serial. */ - sp = rxrpc_skb(skb); - if (sp->hdr.serial != orig_serial) + for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) { + if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail)) continue; - goto found; - } - return; + sent_at = call->rtt_sent_at[i]; + orig_serial = call->rtt_serial[i]; + + if (orig_serial == acked_serial) { + clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + smp_mb(); /* Read data before setting avail bit */ + set_bit(i, &call->rtt_avail); + if (type != rxrpc_rtt_rx_cancel) + rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, + sent_at, resp_time); + else + trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i, + orig_serial, acked_serial, 0, 0); + matched = true; + } + + /* If a later serial is being acked, then mark this slot as + * being available. + */ + if (after(acked_serial, orig_serial)) { + trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i, + orig_serial, acked_serial, 0, 0); + clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + smp_wmb(); + set_bit(i, &call->rtt_avail); + } + } -found: - rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack, - orig_serial, ack_serial, sent_at, resp_time); + if (!matched) + trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0); } /* @@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call) */ static void rxrpc_input_ping_response(struct rxrpc_call *call, ktime_t resp_time, - rxrpc_serial_t orig_serial, + rxrpc_serial_t acked_serial, rxrpc_serial_t ack_serial) { - rxrpc_serial_t ping_serial; - ktime_t ping_time; - - ping_time = call->ping_time; - smp_rmb(); - ping_serial = READ_ONCE(call->ping_serial); - - if (orig_serial == call->acks_lost_ping) + if (acked_serial == call->acks_lost_ping) rxrpc_input_check_for_lost_ack(call); - - if (before(orig_serial, ping_serial) || - !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags)) - return; - if (after(orig_serial, ping_serial)) - return; - - rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response, - orig_serial, ack_serial, ping_time, resp_time); } /* @@ -843,7 +848,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) struct rxrpc_ackinfo info; u8 acks[RXRPC_MAXACKS]; } buf; - rxrpc_serial_t acked_serial; + rxrpc_serial_t ack_serial, acked_serial; rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; int nr_acks, offset, ioffset; @@ -856,6 +861,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) } offset += sizeof(buf.ack); + ack_serial = sp->hdr.serial; acked_serial = ntohl(buf.ack.serial); first_soft_ack = ntohl(buf.ack.firstPacket); prev_pkt = ntohl(buf.ack.previousPacket); @@ -864,31 +870,42 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? buf.ack.reason : RXRPC_ACK__INVALID); - trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, + trace_rxrpc_rx_ack(call, ack_serial, acked_serial, first_soft_ack, prev_pkt, summary.ack_reason, nr_acks); - if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) + switch (buf.ack.reason) { + case RXRPC_ACK_PING_RESPONSE: rxrpc_input_ping_response(call, skb->tstamp, acked_serial, - sp->hdr.serial); - if (buf.ack.reason == RXRPC_ACK_REQUESTED) - rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, - sp->hdr.serial); + ack_serial); + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_ping_response); + break; + case RXRPC_ACK_REQUESTED: + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_requested_ack); + break; + default: + if (acked_serial != 0) + rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, + rxrpc_rtt_rx_cancel); + break; + } if (buf.ack.reason == RXRPC_ACK_PING) { - _proto("Rx ACK %%%u PING Request", sp->hdr.serial); + _proto("Rx ACK %%%u PING Request", ack_serial); rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, - sp->hdr.serial, true, true, + ack_serial, true, true, rxrpc_propose_ack_respond_to_ping); } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, - sp->hdr.serial, true, true, + ack_serial, true, true, rxrpc_propose_ack_respond_to_ack); } /* Discard any out-of-order or duplicate ACKs (outside lock). */ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { - trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, + trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, first_soft_ack, call->ackr_first_seq, prev_pkt, call->ackr_prev_seq); return; @@ -904,7 +921,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) /* Discard any out-of-order or duplicate ACKs (inside lock). */ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { - trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, + trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, first_soft_ack, call->ackr_first_seq, prev_pkt, call->ackr_prev_seq); goto out; @@ -964,7 +981,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) RXRPC_TX_ANNO_LAST && summary.nr_acks == call->tx_top - hard_ack && rxrpc_is_client_call(call)) - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, + rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial, false, true, rxrpc_propose_ack_ping_for_lost_reply); @@ -1084,7 +1101,7 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, switch (READ_ONCE(call->state)) { case RXRPC_CALL_SERVER_AWAIT_ACK: rxrpc_call_completed(call); - /* Fall through */ + fallthrough; case RXRPC_CALL_COMPLETE: break; default: @@ -1243,12 +1260,12 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) case RXRPC_PACKET_TYPE_BUSY: if (rxrpc_to_server(sp)) goto discard; - /* Fall through */ + fallthrough; case RXRPC_PACKET_TYPE_ACK: case RXRPC_PACKET_TYPE_ACKALL: if (sp->hdr.callNumber == 0) goto bad_message; - /* Fall through */ + fallthrough; case RXRPC_PACKET_TYPE_ABORT: break; diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index c8b2097f499c022513d879ed9b8e4ecb6e18454b..ede058f9cc15e9c8d08888bb7ca03bb50c3486ba 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -162,7 +162,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) /* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ - /* Fall through */ + fallthrough; case AF_INET: /* we want to receive ICMP errors */ ip_sock_set_recverr(local->socket->sk); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 1ba43c3df4adb227aab69af0a4979e093e087744..3cfff7922ba820219c82029b0e4d824175f178de 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -123,6 +123,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, return top - hard_ack + 3; } +/* + * Record the beginning of an RTT probe. + */ +static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial, + enum rxrpc_rtt_tx_trace why) +{ + unsigned long avail = call->rtt_avail; + int rtt_slot = 9; + + if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK)) + goto no_slot; + + rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK); + if (!test_and_clear_bit(rtt_slot, &call->rtt_avail)) + goto no_slot; + + call->rtt_serial[rtt_slot] = serial; + call->rtt_sent_at[rtt_slot] = ktime_get_real(); + smp_wmb(); /* Write data before avail bit */ + set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + + trace_rxrpc_rtt_tx(call, why, rtt_slot, serial); + return rtt_slot; + +no_slot: + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial); + return -1; +} + +/* + * Cancel an RTT probe. + */ +static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call, + rxrpc_serial_t serial, int rtt_slot) +{ + if (rtt_slot != -1) { + clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); + smp_wmb(); /* Clear pending bit before setting slot */ + set_bit(rtt_slot, &call->rtt_avail); + trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial); + } +} + /* * Send an ACK call packet. */ @@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, rxrpc_serial_t serial; rxrpc_seq_t hard_ack, top; size_t len, n; - int ret; + int ret, rtt_slot = -1; u8 reason; if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) @@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, if (_serial) *_serial = serial; - if (ping) { - call->ping_serial = serial; - smp_wmb(); - /* We need to stick a time in before we send the packet in case - * the reply gets back before kernel_sendmsg() completes - but - * asking UDP to send the packet can take a relatively long - * time. - */ - call->ping_time = ktime_get_real(); - set_bit(RXRPC_CALL_PINGING, &call->flags); - trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial); - } + if (ping) + rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping); ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); conn->params.peer->last_tx_at = ktime_get_seconds(); @@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, if (call->state < RXRPC_CALL_COMPLETE) { if (ret < 0) { - if (ping) - clear_bit(RXRPC_CALL_PINGING, &call->flags); + rxrpc_cancel_rtt_probe(call, serial, rtt_slot); rxrpc_propose_ACK(call, pkt->ack.reason, ntohl(pkt->ack.serial), false, true, @@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, struct kvec iov[2]; rxrpc_serial_t serial; size_t len; - int ret; + int ret, rtt_slot = -1; _enter(",{%d}", skb->len); @@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, sp->hdr.serial = serial; smp_wmb(); /* Set serial before timestamp */ skb->tstamp = ktime_get_real(); + if (whdr.flags & RXRPC_REQUEST_ACK) + rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet @@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, conn->params.peer->last_tx_at = ktime_get_seconds(); up_read(&conn->params.local->defrag_sem); - if (ret < 0) + if (ret < 0) { + rxrpc_cancel_rtt_probe(call, serial, rtt_slot); trace_rxrpc_tx_fail(call->debug_id, serial, ret, rxrpc_tx_point_call_data_nofrag); - else + } else { trace_rxrpc_tx_packet(call->debug_id, &whdr, rxrpc_tx_point_call_data_nofrag); + } + rxrpc_tx_backoff(call, ret); if (ret == -EMSGSIZE) goto send_fragmentable; @@ -422,7 +459,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, if (ret >= 0) { if (whdr.flags & RXRPC_REQUEST_ACK) { call->peer->rtt_last_req = skb->tstamp; - trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); if (call->peer->rtt_count > 1) { unsigned long nowj = jiffies, ack_lost_at; @@ -469,6 +505,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, sp->hdr.serial = serial; smp_wmb(); /* Set serial before timestamp */ skb->tstamp = ktime_get_real(); + if (whdr.flags & RXRPC_REQUEST_ACK) + rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); switch (conn->params.local->srx.transport.family) { case AF_INET6: @@ -487,12 +525,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, BUG(); } - if (ret < 0) + if (ret < 0) { + rxrpc_cancel_rtt_probe(call, serial, rtt_slot); trace_rxrpc_tx_fail(call->debug_id, serial, ret, rxrpc_tx_point_call_data_frag); - else + } else { trace_rxrpc_tx_packet(call->debug_id, &whdr, rxrpc_tx_point_call_data_frag); + } rxrpc_tx_backoff(call, ret); up_write(&conn->params.local->defrag_sem); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index a852f46d5234d78e7c2e45e3da3965c58f3adc40..be032850ae8ca042cd3b4ab262e6fc25928fe5a0 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -273,7 +273,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer, case SO_EE_ORIGIN_ICMP6: if (err == EACCES) err = EHOSTUNREACH; - /* Fall through */ + fallthrough; default: _proto("Rx Received error report { orig=%u }", ee->ee_origin); break; diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index ca29976bb193efd37fb9bc7f83cbdd4aa51f1737..68396d05205252177ea9525b43972f1598480e12 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer); * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT * @sock: The socket on which the call is in progress. * @call: The call to query + * @_srtt: Where to store the SRTT value. * - * Get the call's peer smoothed RTT. + * Get the call's peer smoothed RTT in uS. */ -u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) +bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call, + u32 *_srtt) { - return call->peer->srtt_us >> 3; + struct rxrpc_peer *peer = call->peer; + + if (peer->rtt_count == 0) { + *_srtt = 1000000; /* 1S */ + return false; + } + + *_srtt = call->peer->srtt_us >> 3; + return true; } EXPORT_SYMBOL(rxrpc_kernel_get_srtt); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index efecc5a8f67d74a4f1c2ee15d8e72fb3dbea9f41..c4684dde1f160b394ae783c758c8a573e8b3979b 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -776,7 +776,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, case RXRPC_ACK_DELAY: if (ret != -EAGAIN) break; - /* Fall through */ + fallthrough; default: rxrpc_send_ack_packet(call, false, NULL); } diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c index 928d8b34a3eeee8685edc2f617a47036b9fac0ba..1221b0637a7ecce9f2e7eda92394abe700e2ec9d 100644 --- a/net/rxrpc/rtt.c +++ b/net/rxrpc/rtt.c @@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) * exclusive access to the peer RTT data. */ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, + int rtt_slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, ktime_t send_time, ktime_t resp_time) { @@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, peer->rtt_count++; spin_unlock(&peer->rtt_input_lock); - trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, + trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial, peer->srtt_us >> 3, peer->rto_j); } diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 52a24d4ef5d8a841a6e0f1ad58c14873ae03e666..e08130e5746b552ee1b0b948da510af6260587ff 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -1137,7 +1137,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, ret = -ENOMEM; ticket = kmalloc(ticket_len, GFP_NOFS); if (!ticket) - goto temporary_error; + goto temporary_error_free_resp; eproto = tracepoint_string("rxkad_tkt_short"); abort_code = RXKADPACKETSHORT; @@ -1230,6 +1230,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn, temporary_error_free_ticket: kfree(ticket); +temporary_error_free_resp: kfree(response); temporary_error: /* Ignore the response packet if we got a temporary error such as diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index f3f6da6e4ad29cabd9c767ce63334555f8d952ac..0824e103d037d4bde1547ac62a21840c1e0a36eb 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -241,7 +241,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); if (!last) break; - /* Fall through */ + fallthrough; case RXRPC_CALL_SERVER_SEND_REPLY: call->state = RXRPC_CALL_SERVER_AWAIT_ACK; rxrpc_notify_end_tx(rx, call, notify_end_tx); @@ -721,13 +721,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (p.call.timeouts.normal > 0 && j == 0) j = 1; WRITE_ONCE(call->next_rx_timo, j); - /* Fall through */ + fallthrough; case 2: j = msecs_to_jiffies(p.call.timeouts.idle); if (p.call.timeouts.idle > 0 && j == 0) j = 1; WRITE_ONCE(call->next_req_timo, j); - /* Fall through */ + fallthrough; case 1: if (p.call.timeouts.hard > 0) { j = msecs_to_jiffies(p.call.timeouts.hard); diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index e6ad42b11835f8d35d2c96604c910985a5f44325..2c3619165680fd5abfcd73b353f8dd3f675fc0c3 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -704,7 +704,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, err = ip_defrag(net, skb, user); local_bh_enable(); if (err && err != -EINPROGRESS) - goto out_free; + return err; if (!err) { *defrag = true; diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 0618b63f87c410422ea78733898033caa32272d4..7d37638ee1c7a9dd0706b27a36fbe58cbf234010 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1670,7 +1670,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return 0; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index deac82f3ad7b860b7494901e5084863a3cc13e0c..e89fab6ccb34f733a474111a08128076b8c77198 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -353,23 +353,11 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, tb[TCA_RED_EARLY_DROP_BLOCK], extack); if (err) - goto err_early_drop_init; - - err = tcf_qevent_init(&q->qe_mark, sch, - FLOW_BLOCK_BINDER_TYPE_RED_MARK, - tb[TCA_RED_MARK_BLOCK], extack); - if (err) - goto err_mark_init; - - return 0; + return err; -err_mark_init: - tcf_qevent_destroy(&q->qe_early_drop, sch); -err_early_drop_init: - del_timer_sync(&q->adapt_timer); - red_offload(sch, false); - qdisc_put(q->qdisc); - return err; + return tcf_qevent_init(&q->qe_mark, sch, + FLOW_BLOCK_BINDER_TYPE_RED_MARK, + tb[TCA_RED_MARK_BLOCK], extack); } static int red_change(struct Qdisc *sch, struct nlattr *opt, diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e981992634ddfe9373fe19d4915615bf5c334691..fe53c1e38c7d61b9b85217dce45724539a1f3559 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1176,9 +1176,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q) spin_unlock(&q->current_entry_lock); } -static void taprio_sched_to_offload(struct taprio_sched *q, +static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) +{ + u32 i, queue_mask = 0; + + for (i = 0; i < dev->num_tc; i++) { + u32 offset, count; + + if (!(tc_mask & BIT(i))) + continue; + + offset = dev->tc_to_txq[i].offset; + count = dev->tc_to_txq[i].count; + + queue_mask |= GENMASK(offset + count - 1, offset); + } + + return queue_mask; +} + +static void taprio_sched_to_offload(struct net_device *dev, struct sched_gate_list *sched, - const struct tc_mqprio_qopt *mqprio, struct tc_taprio_qopt_offload *offload) { struct sched_entry *entry; @@ -1193,7 +1211,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q, e->command = entry->command; e->interval = entry->interval; - e->gate_mask = entry->gate_mask; + e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask); + i++; } @@ -1201,7 +1220,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q, } static int taprio_enable_offload(struct net_device *dev, - struct tc_mqprio_qopt *mqprio, struct taprio_sched *q, struct sched_gate_list *sched, struct netlink_ext_ack *extack) @@ -1223,7 +1241,7 @@ static int taprio_enable_offload(struct net_device *dev, return -ENOMEM; } offload->enable = 1; - taprio_sched_to_offload(q, sched, mqprio, offload); + taprio_sched_to_offload(dev, sched, offload); err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); if (err < 0) { @@ -1485,7 +1503,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, } if (FULL_OFFLOAD_IS_ENABLED(q->flags)) - err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); + err = taprio_enable_offload(dev, q, new_admin, extack); else err = taprio_disable_offload(dev, q, extack); if (err) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index aea2a982984d0283de6bbdfbd3eb7a817cc61e2f..8a58f42d6d1954996b8b8c1ac6925b57100e792b 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -875,7 +875,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) case AF_INET: if (!__ipv6_only_sock(sctp_opt2sk(sp))) return 1; - /* fallthru */ + fallthrough; default: return 0; } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 577e3bc4ee6f1691cfa93733fbecec23baf4c78e..3fd06a27105ddbfdaf9fc288558d57e471572341 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -912,7 +912,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) case SCTP_CID_ABORT: if (sctp_test_T_bit(chunk)) ctx->packet->vtag = ctx->asoc->c.my_vtag; - /* fallthru */ + fallthrough; /* The following chunks are "response" chunks, i.e. * they are generated in response to something we @@ -927,7 +927,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx) case SCTP_CID_ECN_CWR: case SCTP_CID_ASCONF_ACK: one_packet = 1; - /* Fall through */ + fallthrough; case SCTP_CID_SACK: case SCTP_CID_HEARTBEAT: @@ -1030,7 +1030,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx, if (!ctx->packet || !ctx->packet->has_cookie_echo) return; - /* fall through */ + fallthrough; case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 47910470e53283d9a9143120049c43b0b7db1894..c11c24524652c51c78b0ae292d85758bc865d4de 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2077,7 +2077,7 @@ static enum sctp_ierror sctp_process_unk_param( break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; - /* Fall through */ + fallthrough; case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 9f36fe911d082e9fa3e3de7096d56323a93d4ba6..aa821e71f05e77532cd42237233820099dc2b80d 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1516,7 +1516,7 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, if (timer_pending(timer)) break; - /* fall through */ + fallthrough; case SCTP_CMD_TIMER_START: timer = &asoc->timers[cmd->obj.to]; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e86620fbd90fd0ecc80b499c6ae058aa4476cc99..c669f8bd1eab2f629bd4bf26a041921d2dcabb44 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -4315,7 +4315,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(err_chunk)); } - /* Fall Through */ + fallthrough; case SCTP_IERROR_AUTH_BAD_KEYID: case SCTP_IERROR_BAD_SIG: return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ec1fba1fbe717ab64a37e8b7cb06e8d0a2186b4e..836615f71a7d1dc6964f01beeadb0ad1384a176b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) pr_debug("%s: begins, snum:%d\n", __func__, snum); - local_bh_disable(); - if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; @@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) continue; index = sctp_phashfn(net, rover); head = &sctp_port_hashtable[index]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(net, pp->net)) goto next; break; next: - spin_unlock(&head->lock); + spin_unlock_bh(&head->lock); + cond_resched(); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) - goto fail; + return ret; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's @@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(net, snum)]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, net)) goto pp_found; @@ -8207,10 +8206,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ret = 0; fail_unlock: - spin_unlock(&head->lock); - -fail: - local_bh_enable(); + spin_unlock_bh(&head->lock); return ret; } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index bda2536dd740f7efe7b19dfc17cfc939bae38f1f..6dc95dcc0ff4f065014dbc376aa01b86eb9b2db0 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -88,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, int ret; if (outcnt <= stream->outcnt) - return 0; + goto out; ret = genradix_prealloc(&stream->out, outcnt, gfp); if (ret) return ret; +out: stream->outcnt = outcnt; return 0; } @@ -104,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt, int ret; if (incnt <= stream->incnt) - return 0; + goto out; ret = genradix_prealloc(&stream->in, incnt, gfp); if (ret) return ret; +out: stream->incnt = incnt; return 0; } diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 290270c821ca47765dc82bd1909810795ad21804..0e7409e469c02df8ba02ef90d9cfa5595c244240 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -116,7 +116,6 @@ static void smc_close_cancel_work(struct smc_sock *smc) cancel_work_sync(&smc->conn.close_work); cancel_delayed_work_sync(&smc->conn.tx_work); lock_sock(sk); - sk->sk_state = SMC_CLOSED; } /* terminate smc socket abnormally - active abort @@ -134,22 +133,22 @@ void smc_close_active_abort(struct smc_sock *smc) } switch (sk->sk_state) { case SMC_ACTIVE: - sk->sk_state = SMC_PEERABORTWAIT; - smc_close_cancel_work(smc); - sk->sk_state = SMC_CLOSED; - sock_put(sk); /* passive closing */ - break; case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT2: + sk->sk_state = SMC_PEERABORTWAIT; smc_close_cancel_work(smc); + if (sk->sk_state != SMC_PEERABORTWAIT) + break; sk->sk_state = SMC_CLOSED; - sock_put(sk); /* postponed passive closing */ + sock_put(sk); /* (postponed) passive closing */ break; case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: case SMC_PEERFINCLOSEWAIT: sk->sk_state = SMC_PEERABORTWAIT; smc_close_cancel_work(smc); + if (sk->sk_state != SMC_PEERABORTWAIT) + break; sk->sk_state = SMC_CLOSED; smc_conn_free(&smc->conn); release_clcsock = true; @@ -159,6 +158,8 @@ void smc_close_active_abort(struct smc_sock *smc) case SMC_APPFINCLOSEWAIT: sk->sk_state = SMC_PEERABORTWAIT; smc_close_cancel_work(smc); + if (sk->sk_state != SMC_PEERABORTWAIT) + break; sk->sk_state = SMC_CLOSED; smc_conn_free(&smc->conn); release_clcsock = true; @@ -372,7 +373,7 @@ static void smc_close_passive_work(struct work_struct *work) case SMC_PEERCLOSEWAIT1: if (rxflags->peer_done_writing) sk->sk_state = SMC_PEERCLOSEWAIT2; - /* fall through */ + fallthrough; /* to check for closing */ case SMC_PEERCLOSEWAIT2: if (!smc_cdc_rxed_any_close(conn)) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index b42fa3b00d00ae3507efe577fe1cfd5a6abf522b..a406627b1d552949f39611d02aee96da243862ac 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1356,6 +1356,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) if (ini->is_smcd) { conn->rx_off = sizeof(struct smcd_cdc_msg); smcd_cdc_rx_init(conn); /* init tasklet for this conn */ + } else { + conn->rx_off = 0; } #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&conn->acurs_lock); @@ -1777,6 +1779,7 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd) list_del(&smc->conn.sndbuf_desc->list); mutex_unlock(&smc->conn.lgr->sndbufs_lock); smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); + smc->conn.sndbuf_desc = NULL; } return rc; } diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index e1f64f4ba2361432a58ce1d19ec0ecd41462971e..da9ba6d1679b7804f72ff3b3b8e222e1d3dfc833 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_connection *conn = &smc->conn; - struct smcd_diag_dmbinfo dinfo = { - .linkid = *((u32 *)conn->lgr->id), - .peer_gid = conn->lgr->peer_gid, - .my_gid = conn->lgr->smcd->local_gid, - .token = conn->rmb_desc->token, - .peer_token = conn->peer_token - }; + struct smcd_diag_dmbinfo dinfo; + + memset(&dinfo, 0, sizeof(dinfo)); + + dinfo.linkid = *((u32 *)conn->lgr->id); + dinfo.peer_gid = conn->lgr->peer_gid; + dinfo.my_gid = conn->lgr->smcd->local_gid; + dinfo.token = conn->rmb_desc->token; + dinfo.peer_token = conn->peer_token; if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) goto errout; diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index df5b0a6ea8488ffb53fb5a4aa5b564d557ee4400..3ea33466ebe98c7d39afd442ed37bcaa0fab5b8a 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -841,6 +841,9 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) struct smc_init_info ini; int lnk_idx, rc = 0; + if (!llc->qp_mtu) + goto out_reject; + ini.vlan_id = lgr->vlan_id; smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && @@ -917,10 +920,20 @@ static void smc_llc_cli_add_link_invite(struct smc_link *link, kfree(qentry); } +static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++) + if (llc->raw.data[i]) + return false; + return true; +} + static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) { if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && - !llc->add_link.qp_mtu && !llc->add_link.link_num) + smc_llc_is_empty_llc_message(llc)) return true; return false; } diff --git a/net/socket.c b/net/socket.c index dbbe8ea7d395dadf690a4c2dfdf60ed40c49df6d..0c0144604f818a77fed154aafa5c504e9a99e1f9 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3610,7 +3610,7 @@ int kernel_getsockname(struct socket *sock, struct sockaddr *addr) EXPORT_SYMBOL(kernel_getsockname); /** - * kernel_peername - get the address which the socket is connected (kernel space) + * kernel_getpeername - get the address which the socket is connected (kernel space) * @sock: socket * @addr: address holder * @@ -3671,7 +3671,7 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, EXPORT_SYMBOL(kernel_sendpage_locked); /** - * kernel_shutdown - shut down part of a full-duplex connection (kernel space) + * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space) * @sock: socket * @how: connection part * diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 90b8329fef8242c0d2538a36669c3460e8906309..8b300b74a722178bc0807400e3a807d1fd8078d7 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -137,7 +137,7 @@ gss_krb5_make_confounder(char *p, u32 conflen) switch (conflen) { case 16: *q++ = i++; - /* fall through */ + fallthrough; case 8: *q++ = i++; break; diff --git a/net/sunrpc/auth_gss/trace.c b/net/sunrpc/auth_gss/trace.c index d26036a5744320c430b5c3882cf61d5801eddacf..76685abba60fec016a92efe6b08503b92067ae4f 100644 --- a/net/sunrpc/auth_gss/trace.c +++ b/net/sunrpc/auth_gss/trace.c @@ -9,7 +9,6 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index a91d1cdad9d7e02a3d4ebeda496fc80669beae27..62e0b6c1e8cf9d47bfa609727599c93bf604bea3 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1702,7 +1702,7 @@ call_reserveresult(struct rpc_task *task) switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); - /* fall through */ + fallthrough; case -EAGAIN: /* woken up; retry */ task->tk_action = call_retry_reserve; return; @@ -1759,13 +1759,13 @@ call_refreshresult(struct rpc_task *task) /* Use rate-limiting and a max number of retries if refresh * had status 0 but failed to update the cred. */ - /* fall through */ + fallthrough; case -ETIMEDOUT: rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -EAGAIN: status = -EACCES; - /* fall through */ + fallthrough; case -EKEYEXPIRED: if (!task->tk_cred_retry) break; @@ -2132,7 +2132,7 @@ call_connect_status(struct rpc_task *task) rpc_force_rebind(clnt); goto out_retry; } - /* fall through */ + fallthrough; case -ECONNRESET: case -ECONNABORTED: case -ENETDOWN: @@ -2146,7 +2146,7 @@ call_connect_status(struct rpc_task *task) break; /* retry with existing socket, after a delay */ rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -EADDRINUSE: case -ENOTCONN: case -EAGAIN: @@ -2228,7 +2228,7 @@ call_transmit_status(struct rpc_task *task) */ case -ENOBUFS: rpc_delay(task, HZ>>2); - /* fall through */ + fallthrough; case -EBADSLT: case -EAGAIN: task->tk_action = call_transmit; @@ -2247,7 +2247,7 @@ call_transmit_status(struct rpc_task *task) rpc_call_rpcerror(task, task->tk_status); return; } - /* fall through */ + fallthrough; case -ECONNRESET: case -ECONNABORTED: case -EADDRINUSE: @@ -2313,7 +2313,7 @@ call_bc_transmit_status(struct rpc_task *task) break; case -ENOBUFS: rpc_delay(task, HZ>>2); - /* fall through */ + fallthrough; case -EBADSLT: case -EAGAIN: task->tk_status = 0; @@ -2380,7 +2380,7 @@ call_status(struct rpc_task *task) * were a timeout. */ rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -ETIMEDOUT: break; case -ECONNREFUSED: @@ -2391,7 +2391,7 @@ call_status(struct rpc_task *task) break; case -EADDRINUSE: rpc_delay(task, 3*HZ); - /* fall through */ + fallthrough; case -EPIPE: case -EAGAIN: break; diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index c27123e6ba80c1f166c1212aedf6f6385c6b1950..4a67685c83eb4f6d8d1b32f3fe1a2d85617a9ae5 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -982,8 +982,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, p = xdr_inline_decode(xdr, len); if (unlikely(p == NULL)) goto out_fail; - dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid, - req->rq_task->tk_msg.rpc_proc->p_name, (char *)p); + dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid, + req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p); if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len, sap, sizeof(address)) == 0) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 6ba9d58426291c52f8dea999edc62d49427bc2b8..5a8e47bbfb9f45a925a6eb2fd891fb917a1e09a4 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1623,7 +1623,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) case -EAGAIN: xprt_add_backlog(xprt, task); dprintk("RPC: waiting for request slot\n"); - /* fall through */ + fallthrough; default: task->tk_status = -EAGAIN; } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 75c646743df3ef7ccc1f348eb8914a5f4e330427..ad6e2e4994ce8edd52b920976a27f3bc8ffc0d2c 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -268,7 +268,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DEVICE_REMOVAL: pr_info("rpcrdma: removing device %s for %pISpc\n", ep->re_id->device->name, sap); - /* fall through */ + fallthrough; case RDMA_CM_EVENT_ADDR_CHANGE: ep->re_connect_status = -ENODEV; goto disconnected; @@ -933,6 +933,8 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req) rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); + + frwr_reset(req); } /* ASSUMPTION: the rb_allreqs list is stable for the duration, diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c57aef82940356112094802190adf5e8fe32fc21..554e1bb4c1c7e4cf0c63f2265c5214d07a071c09 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -885,7 +885,7 @@ static int xs_local_send_request(struct rpc_rqst *req) default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); - /* fall through */ + fallthrough; case -EPIPE: xs_close(xprt); status = -ENOTCONN; @@ -1436,7 +1436,7 @@ static void xs_tcp_state_change(struct sock *sk) xprt->connect_cookie++; clear_bit(XPRT_CONNECTED, &xprt->state); xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); - /* fall through */ + fallthrough; case TCP_CLOSING: /* * If the server closed down the connection, make sure that @@ -2202,7 +2202,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) switch (ret) { case 0: xs_set_srcport(transport, sock); - /* fall through */ + fallthrough; case -EINPROGRESS: /* SYN_SENT! */ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) @@ -2255,7 +2255,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) default: printk("%s: connect returned unhandled error %d\n", __func__, status); - /* fall through */ + fallthrough; case -EADDRNOTAVAIL: /* We're probably in TIME_WAIT. Get rid of existing socket, * and retry diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig index 9dd780215eef4a2e361f61cb8de945388cc1bd9d..be1c4003d67d286955c31d0aa1ffb4fc27981b02 100644 --- a/net/tipc/Kconfig +++ b/net/tipc/Kconfig @@ -6,6 +6,7 @@ menuconfig TIPC tristate "The TIPC Protocol" depends on INET + depends on IPV6 || IPV6=n help The Transparent Inter Process Communication (TIPC) protocol is specially designed for intra cluster communication. This protocol diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 808b147df7d527bd2da5a9e7c4fdadc5404794aa..6504141104521c744fc81ec658ab9e5eeb361b5b 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -652,7 +652,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, test_and_set_bit_lock(0, &b->up); break; } - /* fall through */ + fallthrough; case NETDEV_GOING_DOWN: clear_bit_unlock(0, &b->up); tipc_reset_bearer(net, b); diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 001bcb0f2480dca13c4a1cdf84192d9b0f920451..7c523dc81575e18974969d633dd8782c0d91a47d 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -326,7 +326,8 @@ static void tipc_aead_free(struct rcu_head *rp) if (aead->cloned) { tipc_aead_put(aead->cloned); } else { - head = *this_cpu_ptr(aead->tfm_entry); + head = *get_cpu_ptr(aead->tfm_entry); + put_cpu_ptr(aead->tfm_entry); list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { crypto_free_aead(tfm_entry->tfm); list_del(&tfm_entry->list); @@ -399,10 +400,15 @@ static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) */ static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) { - struct tipc_tfm **tfm_entry = this_cpu_ptr(aead->tfm_entry); + struct tipc_tfm **tfm_entry; + struct crypto_aead *tfm; + tfm_entry = get_cpu_ptr(aead->tfm_entry); *tfm_entry = list_next_entry(*tfm_entry, list); - return (*tfm_entry)->tfm; + tfm = (*tfm_entry)->tfm; + put_cpu_ptr(tfm_entry); + + return tfm; } /** @@ -757,10 +763,12 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err) switch (err) { case 0: this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); + rcu_read_lock(); if (likely(test_bit(0, &b->up))) b->media->send_msg(net, skb, b, &tx_ctx->dst); else kfree_skb(skb); + rcu_read_unlock(); break; case -EINPROGRESS: return; diff --git a/net/tipc/group.c b/net/tipc/group.c index 89257e2a980de39c480278300e89161ae3458766..588c2d2b0c697f67021337ed701aaa8a0a93b1fb 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -536,7 +536,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq, update = true; deliver = false; } - /* Fall thru */ + fallthrough; case TIPC_GRP_BCAST_MSG: m->bc_rcv_nxt++; ack = msg_grp_bc_ack_req(hdr); diff --git a/net/tipc/link.c b/net/tipc/link.c index 1075781229739d2ce1cfd65bb2af6987efd6d603..b7362556da95a583edfa451a110a0bf218ae74f1 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1239,7 +1239,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, skb_queue_tail(mc_inputq, skb); return true; } - /* fall through */ + fallthrough; case CONN_MANAGER: skb_queue_tail(inputq, skb); return true; diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 217516357ef26223a309be055cb8f6bbc7a78007..90e3c70a91ad0757ad643a17f50b7e767c5a498b 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -275,8 +275,9 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) { - int err; + struct nlmsghdr *nlh; struct sk_buff *arg; + int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) @@ -305,6 +306,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; } + nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); + if (!nlh) { + kfree_skb(arg); + kfree_skb(msg->rep); + msg->rep = NULL; + return -EMSGSIZE; + } + nlmsg_end(arg, nlh); + err = __tipc_nl_compat_dumpit(cmd, msg, arg); if (err) { kfree_skb(msg->rep); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 07419f36116a8496cc366092177712f2ff941b83..ebd280e767bde87d5603207d03c0c3c32ff925a4 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -783,7 +783,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock, case TIPC_ESTABLISHED: if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) revents |= EPOLLOUT; - /* fall through */ + fallthrough; case TIPC_LISTEN: case TIPC_CONNECTING: if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) @@ -2597,7 +2597,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, * case is EINPROGRESS, rather than EALREADY. */ res = -EINPROGRESS; - /* fall through */ + fallthrough; case TIPC_CONNECTING: if (!timeout) { if (previous == TIPC_CONNECTING) @@ -2771,18 +2771,21 @@ static int tipc_shutdown(struct socket *sock, int how) trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - sk->sk_shutdown = SEND_SHUTDOWN; + if (tipc_sk_type_connectionless(sk)) + sk->sk_shutdown = SHUTDOWN_MASK; + else + sk->sk_shutdown = SEND_SHUTDOWN; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue); - /* Wake up anyone sleeping in poll */ - sk->sk_state_change(sk); res = 0; } else { res = -ENOTCONN; } + /* Wake up anyone sleeping in poll. */ + sk->sk_state_change(sk); release_sock(sk); return res; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 53f0de0676b7a5fb3a620f34d22e71caeee68dbd..911d13cd2e675a0f6f1a6c7c6a341703b6b318af 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -660,6 +660,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; u8 node_id[NODE_ID_LEN] = {0,}; + struct net_device *dev; int rmcast = 0; ub = kzalloc(sizeof(*ub), GFP_ATOMIC); @@ -714,8 +715,6 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, rcu_assign_pointer(ub->bearer, b); tipc_udp_media_addr_set(&b->addr, &local); if (local.proto == htons(ETH_P_IP)) { - struct net_device *dev; - dev = __ip_dev_find(net, local.ipv4.s_addr, false); if (!dev) { err = -ENODEV; @@ -738,9 +737,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, b->mtu = b->media->mtu; #if IS_ENABLED(CONFIG_IPV6) } else if (local.proto == htons(ETH_P_IPV6)) { - struct net_device *dev; - - dev = ipv6_dev_find(net, &local.ipv6); + dev = ub->ifindex ? __dev_get_by_index(net, ub->ifindex) : NULL; + dev = ipv6_dev_find(net, &local.ipv6, dev); if (!dev) { err = -ENODEV; goto err; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 181ea6fb56a617e722d9bf617c127fbc3ef73ddd..92784e51ee7d978a6c5b1ccd93814756189be028 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -837,7 +837,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol, */ case SOCK_RAW: sock->type = SOCK_DGRAM; - /* fall through */ + fallthrough; case SOCK_DGRAM: sock->ops = &unix_dgram_ops; break; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 90f0f82cd9ca76722e529573b4fa0ff9da2055ff..6a6f2f214c10f11b4d6341ed6e533f7c846695d3 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -10,6 +10,7 @@ */ #include +#include #include #include "core.h" #include "rdev-ops.h" @@ -912,6 +913,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, struct ieee80211_sta_vht_cap *vht_cap; struct ieee80211_edmg *edmg_cap; u32 width, control_freq, cap; + bool support_80_80 = false; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return false; @@ -957,7 +959,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, if (!ht_cap->ht_supported && chandef->chan->band != NL80211_BAND_6GHZ) return false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20_NOHT: prohibited_flags |= IEEE80211_CHAN_NO_20MHZ; width = 20; @@ -979,11 +981,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, return false; break; case NL80211_CHAN_WIDTH_80P80: - cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; - if (chandef->chan->band != NL80211_BAND_6GHZ && - cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + cap = vht_cap->cap; + support_80_80 = + (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) || + (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && + cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) || + u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1; + if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80) return false; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_80: prohibited_flags |= IEEE80211_CHAN_NO_80MHZ; width = 80; @@ -1001,7 +1007,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, return false; cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && - cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ && + !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)) return false; break; default: diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index a6c61a2e6569195088509b102156184b0dd092bb..db7333e20dd7172a98b7942c1ba715d0818d0b6f 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -941,7 +941,7 @@ void cfg80211_cac_event(struct net_device *netdev, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); cfg80211_sched_dfs_chan_update(rdev); - /* fall through */ + fallthrough; case NL80211_RADAR_CAC_ABORTED: wdev->cac_started = false; break; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c04fc6cf658389bd62379be21c40edb32a72e7b8..2c9e9a2d1688731fa322191ab35a0a17b17f0471 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2107,7 +2107,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 1: if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, sizeof(u32) * rdev->wiphy.n_cipher_suites, @@ -2154,7 +2154,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 2: if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, rdev->wiphy.interface_modes)) @@ -2162,7 +2162,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 3: nl_bands = nla_nest_start_noflag(msg, NL80211_ATTR_WIPHY_BANDS); @@ -2189,7 +2189,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->chan_start++; if (state->split) break; - /* fall through */ + fallthrough; default: /* add frequencies */ nl_freqs = nla_nest_start_noflag(msg, @@ -2244,7 +2244,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 4: nl_cmds = nla_nest_start_noflag(msg, NL80211_ATTR_SUPPORTED_COMMANDS); @@ -2273,7 +2273,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 5: if (rdev->ops->remain_on_channel && (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && @@ -2291,7 +2291,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 6: #ifdef CONFIG_PM if (nl80211_send_wowlan(msg, rdev, state->split)) @@ -2302,7 +2302,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, #else state->split_start++; #endif - /* fall through */ + fallthrough; case 7: if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, rdev->wiphy.software_iftypes)) @@ -2315,7 +2315,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, state->split_start++; if (state->split) break; - /* fall through */ + fallthrough; case 8: if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, @@ -5207,7 +5207,7 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) break; default: WARN_ON(1); - /* fall through */ + fallthrough; case RATE_INFO_BW_20: rate_flg = 0; break; @@ -6011,7 +6011,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) params.he_6ghz_capa = - nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]); if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]) params.airtime_weight = diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 35b8847a2f6d01f9ac046c038bb93ba830fe7ca1..d8a90d39742357ac8ea48f4e934185591f7310b0 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2946,6 +2946,9 @@ int regulatory_hint_user(const char *alpha2, if (WARN_ON(!alpha2)) return -EINVAL; + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index e67a74488bbe0c554be16b7b8ddbe9e5d8abf13d..04f2d198c2154c20189aa8d83485df7d18fa6ab8 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1433,7 +1433,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, switch (ftype) { case CFG80211_BSS_FTYPE_BEACON: ies->from_beacon = true; - /* fall through */ + fallthrough; case CFG80211_BSS_FTYPE_UNKNOWN: rcu_assign_pointer(tmp.pub.beacon_ies, ies); break; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 985f3c23f0545fe97f5e9ac1f6b7c057031dfa66..079ce320dc1ed7223a3fd2533f24da6d544b9e19 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -205,7 +205,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev, return err; case CFG80211_CONN_ASSOC_FAILED_TIMEOUT: *treason = NL80211_TIMEOUT_ASSOC; - /* fall through */ + fallthrough; case CFG80211_CONN_ASSOC_FAILED: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, @@ -215,7 +215,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev, cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); - /* fall through */ + fallthrough; case CFG80211_CONN_ABANDON: /* free directly, disconnected event already sent */ cfg80211_sme_free(wdev); diff --git a/net/wireless/util.c b/net/wireless/util.c index dfad1c0f57adb2eb707f8398de3efcf0cfc6d819..4a9ff9ef513f920d8ec11ffdc02a44920be24478 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -123,11 +123,13 @@ int ieee80211_freq_khz_to_channel(u32 freq) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; - else if (freq < 5945) + else if (freq < 5925) return (freq - 5000) / 5; + else if (freq == 5935) + return 2; else if (freq <= 45000) /* DMG band lower limit */ - /* see 802.11ax D4.1 27.3.22.2 */ - return (freq - 5940) / 5; + /* see 802.11ax D6.1 27.3.22.2 */ + return (freq - 5950) / 5; else if (freq >= 58320 && freq <= 70200) return (freq - 56160) / 2160; else @@ -198,7 +200,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) sband->bitrates[i].flags |= IEEE80211_RATE_MANDATORY_G; want--; - /* fall through */ + fallthrough; default: sband->bitrates[i].flags |= IEEE80211_RATE_ERP_G; @@ -1008,7 +1010,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_STATION: if (dev->ieee80211_ptr->use_4addr) break; - /* fall through */ + fallthrough; case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_ADHOC: diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index aa918d7ff6bd0c55fb4a523b78b9bafc43204458..4d2160c989a3b755e165fd4019924d432846222a 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1334,7 +1334,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) wstats.qual.qual = sig + 110; break; } - /* fall through */ + fallthrough; case CFG80211_SIGNAL_TYPE_UNSPEC: if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) { wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED; @@ -1343,7 +1343,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) wstats.qual.qual = sinfo.signal; break; } - /* fall through */ + fallthrough; default: wstats.qual.updated |= IW_QUAL_LEVEL_INVALID; wstats.qual.updated |= IW_QUAL_QUAL_INVALID; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 7fb327632272bb49feac1bbc5b1124460a8ae65a..8e1a49b0c0dc5c58d0a69ffd25525f8d7b07211e 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -98,7 +98,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, *vc_fac_mask |= X25_MASK_REVERSE; break; } - /*fall through */ + fallthrough; case X25_FAC_THROUGHPUT: facilities->throughput = p[1]; *vc_fac_mask |= X25_MASK_THROUGHPUT; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 4d3bb46aaae0d9b0d5e4c17168b6886b48677d41..e1c4197af468e9284cae2a95f790875746a7f9ac 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -349,7 +349,7 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp case X25_RESET_REQUEST: x25_write_internal(sk, X25_RESET_CONFIRMATION); - /* fall through */ + fallthrough; case X25_RESET_CONFIRMATION: { x25_stop_timer(sk); x25->condition = 0x00; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d5280fd6f9c127b6099f07108a74f76005923e98..d622c2548d2295c0893b1b5fcbd84862f95fb8ea 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3410,7 +3410,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) switch (nexthdr) { case NEXTHDR_FRAGMENT: onlyproto = 1; - /* fall through */ + fallthrough; case NEXTHDR_ROUTING: case NEXTHDR_HOP: case NEXTHDR_DEST: diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c index 7d71537776785c9537d1ae66ea09a1d2a1e15432..4b22ace52f8051be9a86ba5ad4413aa7718c056d 100644 --- a/samples/bpf/hbm.c +++ b/samples/bpf/hbm.c @@ -483,7 +483,7 @@ int main(int argc, char **argv) "Option -%c requires an argument.\n\n", optopt); case 'h': - // fallthrough + fallthrough; default: Usage(); return 0; diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 62c275685b75e487a8646d8f7554122d1171009b..95e4cdb94fe9f7bfc99f49646c447df98caf23b8 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -66,7 +66,6 @@ KBUILD_CFLAGS += -Wnested-externs KBUILD_CFLAGS += -Wshadow KBUILD_CFLAGS += $(call cc-option, -Wlogical-op) KBUILD_CFLAGS += -Wmissing-field-initializers -KBUILD_CFLAGS += -Wsign-compare KBUILD_CFLAGS += -Wtype-limits KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized) KBUILD_CFLAGS += $(call cc-option, -Wunused-macros) @@ -87,6 +86,7 @@ KBUILD_CFLAGS += -Wpacked KBUILD_CFLAGS += -Wpadded KBUILD_CFLAGS += -Wpointer-arith KBUILD_CFLAGS += -Wredundant-decls +KBUILD_CFLAGS += -Wsign-compare KBUILD_CFLAGS += -Wswitch-default KBUILD_CFLAGS += $(call cc-option, -Wpacked-bitfield-compat) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 60d4a79674b6efd5f82d5f5ac052e9c3bdba8796..504d2e431c60412dd3720ddfe4df9a0c6ae2e338 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2639,8 +2639,8 @@ sub process { # Check if the commit log has what seems like a diff which can confuse patch if ($in_commit_log && !$commit_log_has_diff && - (($line =~ m@^\s+diff\b.*a/[\w/]+@ && - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { ERROR("DIFF_IN_COMMIT_MSG", diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index b071bf476fea7ede6aab95730f5844ac1bf5caa5..3bc48c726c41c93b1ee713f1e7549bc5dd842b71 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -71,7 +71,7 @@ static void drain_openssl_errors(void) static const char *key_pass; static BIO *wb; static char *cert_dst; -int kbuild_verbose; +static int kbuild_verbose; static void write_cert(X509 *x509) { diff --git a/scripts/genksyms/keywords.c b/scripts/genksyms/keywords.c index 7a85c4e21175094b7fb7ca9b7aa3c2b93bc73e7d..057c6cabad1d9b8ac78b93b1b0514545a2f26a70 100644 --- a/scripts/genksyms/keywords.c +++ b/scripts/genksyms/keywords.c @@ -25,9 +25,9 @@ static struct resword { { "__int128_t", BUILTIN_INT_KEYW }, { "__uint128_t", BUILTIN_INT_KEYW }, - // According to rth, c99 defines "_Bool", __restrict", __restrict__", "restrict". KAO + // According to rth, c99 defines "_Bool", "__restrict", "__restrict__", "restrict". KAO { "_Bool", BOOL_KEYW }, - { "_restrict", RESTRICT_KEYW }, + { "__restrict", RESTRICT_KEYW }, { "__restrict__", RESTRICT_KEYW }, { "restrict", RESTRICT_KEYW }, { "asm", ASM_KEYW }, diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index daf1c1506ec4cc95fece4e87f0bd799b4b9c7bef..e0f9655291665409507d668fdae57e71cb6968c3 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -755,7 +755,6 @@ static void build_conf(struct menu *menu) switch (ptype) { case P_MENU: child_count++; - prompt = prompt; if (single_menu_mode) { item_make(menu, 'm', "%s%*c%s", diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index bc390df49f1fcf59f3641ec7ee0e519a6340ec00..8638785328a7fe082223a1205bff52f00098444e 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -885,7 +885,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowName(bool))); connect(parent(), SIGNAL(showNameChanged(bool)), - action, SLOT(setOn(bool))); + action, SLOT(setChecked(bool))); action->setChecked(showName); headerPopup->addAction(action); @@ -894,7 +894,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowRange(bool))); connect(parent(), SIGNAL(showRangeChanged(bool)), - action, SLOT(setOn(bool))); + action, SLOT(setChecked(bool))); action->setChecked(showRange); headerPopup->addAction(action); @@ -903,7 +903,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowData(bool))); connect(parent(), SIGNAL(showDataChanged(bool)), - action, SLOT(setOn(bool))); + action, SLOT(setChecked(bool))); action->setChecked(showData); headerPopup->addAction(action); } @@ -1012,6 +1012,16 @@ ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name) configSettings->endGroup(); connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings())); } + + contextMenu = createStandardContextMenu(); + QAction *action = new QAction("Show Debug Info", contextMenu); + + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool))); + action->setChecked(showDebug()); + contextMenu->addSeparator(); + contextMenu->addAction(action); } void ConfigInfoView::saveSettings(void) @@ -1066,80 +1076,80 @@ void ConfigInfoView::symbolInfo(void) void ConfigInfoView::menuInfo(void) { struct symbol* sym; - QString head, debug, help; + QString info; + QTextStream stream(&info); sym = _menu->sym; if (sym) { if (_menu->prompt) { - head += ""; - head += print_filter(_menu->prompt->text); - head += ""; + stream << ""; + stream << print_filter(_menu->prompt->text); + stream << ""; if (sym->name) { - head += " ("; + stream << " ("; if (showDebug()) - head += QString().sprintf("", sym->name); - head += print_filter(sym->name); + stream << "name << "\">"; + stream << print_filter(sym->name); if (showDebug()) - head += ""; - head += ")"; + stream << ""; + stream << ")"; } } else if (sym->name) { - head += ""; + stream << ""; if (showDebug()) - head += QString().sprintf("", sym->name); - head += print_filter(sym->name); + stream << "name << "\">"; + stream << print_filter(sym->name); if (showDebug()) - head += ""; - head += ""; + stream << ""; + stream << ""; } - head += "

"; + stream << "

"; if (showDebug()) - debug = debug_info(sym); + stream << debug_info(sym); - struct gstr help_gstr = str_new(); - menu_get_ext_help(_menu, &help_gstr); - help = print_filter(str_get(&help_gstr)); - str_free(&help_gstr); } else if (_menu->prompt) { - head += ""; - head += print_filter(_menu->prompt->text); - head += "

"; + stream << ""; + stream << print_filter(_menu->prompt->text); + stream << "

"; if (showDebug()) { if (_menu->prompt->visible.expr) { - debug += "  dep: "; - expr_print(_menu->prompt->visible.expr, expr_print_help, &debug, E_NONE); - debug += "

"; + stream << "  dep: "; + expr_print(_menu->prompt->visible.expr, + expr_print_help, &stream, E_NONE); + stream << "

"; } } } if (showDebug()) - debug += QString().sprintf("defined at %s:%d

", _menu->file->name, _menu->lineno); + stream << "defined at " << _menu->file->name << ":" + << _menu->lineno << "

"; - setText(head + debug + help); + setText(info); } QString ConfigInfoView::debug_info(struct symbol *sym) { QString debug; + QTextStream stream(&debug); - debug += "type: "; - debug += print_filter(sym_type_name(sym->type)); + stream << "type: "; + stream << print_filter(sym_type_name(sym->type)); if (sym_is_choice(sym)) - debug += " (choice)"; + stream << " (choice)"; debug += "
"; if (sym->rev_dep.expr) { - debug += "reverse dep: "; - expr_print(sym->rev_dep.expr, expr_print_help, &debug, E_NONE); - debug += "
"; + stream << "reverse dep: "; + expr_print(sym->rev_dep.expr, expr_print_help, &stream, E_NONE); + stream << "
"; } for (struct property *prop = sym->prop; prop; prop = prop->next) { switch (prop->type) { case P_PROMPT: case P_MENU: - debug += QString().sprintf("prompt: ", sym->name); - debug += print_filter(prop->text); - debug += "
"; + stream << "prompt: name << "\">"; + stream << print_filter(prop->text); + stream << "
"; break; case P_DEFAULT: case P_SELECT: @@ -1147,30 +1157,33 @@ QString ConfigInfoView::debug_info(struct symbol *sym) case P_COMMENT: case P_IMPLY: case P_SYMBOL: - debug += prop_get_type_name(prop->type); - debug += ": "; - expr_print(prop->expr, expr_print_help, &debug, E_NONE); - debug += "
"; + stream << prop_get_type_name(prop->type); + stream << ": "; + expr_print(prop->expr, expr_print_help, + &stream, E_NONE); + stream << "
"; break; case P_CHOICE: if (sym_is_choice(sym)) { - debug += "choice: "; - expr_print(prop->expr, expr_print_help, &debug, E_NONE); - debug += "
"; + stream << "choice: "; + expr_print(prop->expr, expr_print_help, + &stream, E_NONE); + stream << "
"; } break; default: - debug += "unknown property: "; - debug += prop_get_type_name(prop->type); - debug += "
"; + stream << "unknown property: "; + stream << prop_get_type_name(prop->type); + stream << "
"; } if (prop->visible.expr) { - debug += "    dep: "; - expr_print(prop->visible.expr, expr_print_help, &debug, E_NONE); - debug += "
"; + stream << "    dep: "; + expr_print(prop->visible.expr, expr_print_help, + &stream, E_NONE); + stream << "
"; } } - debug += "
"; + stream << "
"; return debug; } @@ -1208,15 +1221,15 @@ QString ConfigInfoView::print_filter(const QString &str) void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char *str) { - QString* text = reinterpret_cast(data); - QString str2 = print_filter(str); + QTextStream *stream = reinterpret_cast(data); if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) { - *text += QString().sprintf("", sym->name); - *text += str2; - *text += ""; - } else - *text += str2; + *stream << "name << "\">"; + *stream << print_filter(str); + *stream << ""; + } else { + *stream << print_filter(str); + } } void ConfigInfoView::clicked(const QUrl &url) @@ -1228,7 +1241,6 @@ void ConfigInfoView::clicked(const QUrl &url) struct menu *m = NULL; if (count < 1) { - qInfo() << "Clicked link is empty"; delete[] data; return; } @@ -1241,7 +1253,6 @@ void ConfigInfoView::clicked(const QUrl &url) strcat(data, "$"); result = sym_re_search(data); if (!result) { - qInfo() << "Clicked symbol is invalid:" << data; delete[] data; return; } @@ -1268,23 +1279,10 @@ void ConfigInfoView::clicked(const QUrl &url) delete data; } -QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) -{ - QMenu* popup = Parent::createStandardContextMenu(pos); - QAction* action = new QAction("Show Debug Info", popup); - - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); - action->setChecked(showDebug()); - popup->addSeparator(); - popup->addAction(action); - return popup; -} - -void ConfigInfoView::contextMenuEvent(QContextMenuEvent *e) +void ConfigInfoView::contextMenuEvent(QContextMenuEvent *event) { - Parent::contextMenuEvent(e); + contextMenu->popup(event->globalPos()); + event->accept(); } ConfigSearchWindow::ConfigSearchWindow(ConfigMainWindow *parent) diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h index 461df6419f15c715fb89e74bf9137eb808ff41ec..f97376a8123f7b953672832796d89539e2b802a7 100644 --- a/scripts/kconfig/qconf.h +++ b/scripts/kconfig/qconf.h @@ -30,7 +30,7 @@ class ConfigSettings : public QSettings { }; enum colIdx { - promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx, colNr + promptColIdx, nameColIdx, noColIdx, modColIdx, yesColIdx, dataColIdx }; enum listMode { singleMode, menuMode, symbolMode, fullMode, listMode @@ -215,6 +215,7 @@ public slots: class ConfigInfoView : public QTextBrowser { Q_OBJECT typedef class QTextBrowser Parent; + QMenu *contextMenu; public: ConfigInfoView(QWidget* parent, const char *name = 0); bool showDebug(void) const { return _showDebug; } @@ -235,8 +236,7 @@ public slots: QString debug_info(struct symbol *sym); static QString print_filter(const QString &str); static void expr_print_help(void *data, struct symbol *sym, const char *str); - QMenu *createStandardContextMenu(const QPoint & pos); - void contextMenuEvent(QContextMenuEvent *e); + void contextMenuEvent(QContextMenuEvent *event); struct symbol *sym; struct menu *_menu; diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 19857d18d814db7bd47118b7b967ad4a69810554..1c78ba49ca99232eda46e121f05dba6832c8237f 100755 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -593,7 +593,10 @@ while ($repeat) { } my %setconfigs; -my @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP}); +my @preserved_kconfigs; +if (defined($ENV{'LMC_KEEP'})) { + @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP}); +} sub in_preserved_kconfigs { my $kconfig = $config2kfile{$_[0]}; diff --git a/scripts/tags.sh b/scripts/tags.sh index 32d3f53af10ba85a3f8d9a2c649d1c29a143f483..850f4ccb6afcf1615ffc88ad0776817afb2a6bcb 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -26,7 +26,11 @@ else fi # ignore userspace tools -ignore="$ignore ( -path ${tree}tools ) -prune -o" +if [ -n "$COMPILED_SOURCE" ]; then + ignore="$ignore ( -path ./tools ) -prune -o" +else + ignore="$ignore ( -path ${tree}tools ) -prune -o" +fi # Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH if [ "${ALLSOURCE_ARCHS}" = "" ]; then @@ -92,7 +96,7 @@ all_sources() all_compiled_sources() { realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) \ - include/generated/autoconf.h $(find -name "*.cmd" -exec \ + include/generated/autoconf.h $(find $ignore -name "*.cmd" -exec \ grep -Poh '(?(?=^source_.* \K).*|(?=^ \K\S).*(?= \\))' {} \+ | awk '!a[$0]++') | sort -u } diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 7b0e13ce7dc7b188f3d9f6917853bbaa78a3c643..f919ebd042fd2571d0fe9707346c71a76c5193cf 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -577,7 +577,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile, stack = NULL; break; } - /* fall through - to X_NAME */ + fallthrough; /* to X_NAME */ case AA_X_NAME: if (xindex & AA_X_CHILD) /* released by caller */ diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 30c246a9d4409f7ef90d5e67f2ec94e6d08c766d..fa49b81eb54caf9332728260844422433b6b85d0 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -292,13 +292,13 @@ void aa_apply_modes_to_perms(struct aa_profile *profile, struct aa_perms *perms) switch (AUDIT_MODE(profile)) { case AUDIT_ALL: perms->audit = ALL_PERMS_MASK; - /* fall through */ + fallthrough; case AUDIT_NOQUIET: perms->quiet = 0; break; case AUDIT_QUIET: perms->audit = 0; - /* fall through */ + fallthrough; case AUDIT_QUIET_DENIED: perms->quiet = ALL_PERMS_MASK; break; diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 372d163829606d0a16b675c88c13cde551f40b01..b8848f53c8cc23b01e380b9d31c7197e5d2ba9a7 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -223,7 +223,7 @@ static int xattr_verify(enum ima_hooks func, struct integrity_iint_cache *iint, case IMA_XATTR_DIGEST_NG: /* first byte contains algorithm id */ hash_start = 1; - /* fall through */ + fallthrough; case IMA_XATTR_DIGEST: if (iint->flags & IMA_DIGSIG_REQUIRED) { *cause = "IMA-signature-required"; @@ -395,7 +395,7 @@ int ima_appraise_measurement(enum ima_hooks func, /* It's fine not to have xattrs when using a modsig. */ if (try_modsig) break; - /* fall through */ + fallthrough; case INTEGRITY_NOLABEL: /* No security.evm xattr. */ cause = "missing-HMAC"; goto out; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 07f033634b27ad30dd835b0c1b4abb1f7e80ae91..b4de33074b37da402e4547aa6dd02bcb86a1edfe 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -1279,12 +1279,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) case Opt_uid_gt: case Opt_euid_gt: entry->uid_op = &uid_gt; - /* fall through */ + fallthrough; case Opt_uid_lt: case Opt_euid_lt: if ((token == Opt_uid_lt) || (token == Opt_euid_lt)) entry->uid_op = &uid_lt; - /* fall through */ + fallthrough; case Opt_uid_eq: case Opt_euid_eq: uid_token = (token == Opt_uid_eq) || @@ -1313,11 +1313,11 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) break; case Opt_fowner_gt: entry->fowner_op = &uid_gt; - /* fall through */ + fallthrough; case Opt_fowner_lt: if (token == Opt_fowner_lt) entry->fowner_op = &uid_lt; - /* fall through */ + fallthrough; case Opt_fowner_eq: ima_log_string_op(ab, "fowner", args[0].from, entry->fowner_op); diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 41a5f435b793fe2c655f982ae99fa4346db94999..c022ee9e2a4e626fe11b63d2b55503295f088fd9 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -77,7 +77,7 @@ static void ima_show_template_data_ascii(struct seq_file *m, /* skip ':' and '\0' */ buf_ptr += 2; buflen -= buf_ptr - field_data->data; - /* fall through */ + fallthrough; case DATA_FMT_DIGEST: case DATA_FMT_HEX: if (!buflen) diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 7e0232db1707e5bed552094d03228d60a5e8658d..1fe8b934f656f9b966ae0584436070fea7f911d9 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -465,7 +465,7 @@ key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx) case -EAGAIN: /* no key */ if (ret) break; - /* fall through */ + fallthrough; case -ENOKEY: /* negative key */ ret = key_ref; break; @@ -487,7 +487,7 @@ key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx) case -EAGAIN: /* no key */ if (ret) break; - /* fall through */ + fallthrough; case -ENOKEY: /* negative key */ ret = key_ref; break; @@ -509,7 +509,7 @@ key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx) case -EAGAIN: /* no key */ if (ret) break; - /* fall through */ + fallthrough; case -ENOKEY: /* negative key */ ret = key_ref; break; diff --git a/security/keys/request_key.c b/security/keys/request_key.c index e1b9f1a80676e5276c6aad7bb6ccda847a66cedb..2da4404276f0f5a68b0619dd6aa1d06a0fdb7198 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -295,26 +295,26 @@ static int construct_get_dest_keyring(struct key **_dest_keyring) } } - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_THREAD_KEYRING: dest_keyring = key_get(cred->thread_keyring); if (dest_keyring) break; - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_PROCESS_KEYRING: dest_keyring = key_get(cred->process_keyring); if (dest_keyring) break; - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_SESSION_KEYRING: dest_keyring = key_get(cred->session_keyring); if (dest_keyring) break; - /* fall through */ + fallthrough; case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: ret = look_up_user_keyrings(NULL, &dest_keyring); if (ret < 0) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ca901025802a9e0aa5d5709f90bda8ee283aaa02..a340986aa92e1d9e2434f2aaf5c208930a8156cb 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3606,26 +3606,20 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case FIONREAD: - /* fall through */ case FIBMAP: - /* fall through */ case FIGETBSZ: - /* fall through */ case FS_IOC_GETFLAGS: - /* fall through */ case FS_IOC_GETVERSION: error = file_has_perm(cred, file, FILE__GETATTR); break; case FS_IOC_SETFLAGS: - /* fall through */ case FS_IOC_SETVERSION: error = file_has_perm(cred, file, FILE__SETATTR); break; /* sys_ioctl() checks */ case FIONBIO: - /* fall through */ case FIOASYNC: error = file_has_perm(cred, file, 0); break; @@ -3783,7 +3777,7 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd, err = file_has_perm(cred, file, FILE__WRITE); break; } - /* fall through */ + fallthrough; case F_SETOWN: case F_SETSIG: case F_GETFL: diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index 408d306895f8fc5e285ff6a58ddffb2a5ed63bfe..d338962fb0c48427d80088579978289aa5618aa7 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c @@ -535,7 +535,7 @@ int mls_compute_sid(struct policydb *p, scontext, tcontext); } - /* Fallthrough */ + fallthrough; case AVTAB_CHANGE: if ((tclass == p->process_class) || sock) /* Use the process MLS attributes. */ @@ -546,8 +546,6 @@ int mls_compute_sid(struct policydb *p, case AVTAB_MEMBER: /* Use the process effective MLS attributes. */ return mls_context_cpy_low(newcontext, scontext); - - /* fall through */ } return -EINVAL; } diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 8ffbf951b7ed78b482b5f0131297011634f2cd79..8c0893eb5aa8f0a3ca08a6a10832fda6218da117 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -3365,7 +3365,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) * to set mount options simulate setting the * superblock default. */ - /* Fall through */ + fallthrough; default: /* * This isn't an understood special case. diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index c16b8c1b03e7dfb97bb2f76e43b5480c80090ab9..4bee32bfe16d1ca88de117e3aedb5cd28f93b58f 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1240,7 +1240,7 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, tomoyo_set_space(head); tomoyo_set_string(head, cond->transit->name); } - /* fall through */ + fallthrough; case 1: { const u16 condc = cond->condc; @@ -1345,12 +1345,12 @@ static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, } } head->r.cond_step++; - /* fall through */ + fallthrough; case 2: if (!tomoyo_flush(head)) break; head->r.cond_step++; - /* fall through */ + fallthrough; case 3: if (cond->grant_log != TOMOYO_GRANTLOG_AUTO) tomoyo_io_printf(head, " grant_log=%s", @@ -1639,7 +1639,7 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) tomoyo_set_string(head, tomoyo_dif[i]); head->r.index = 0; head->r.step++; - /* fall through */ + fallthrough; case 1: while (head->r.index < TOMOYO_MAX_ACL_GROUPS) { i = head->r.index++; @@ -1652,14 +1652,14 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) head->r.index = 0; head->r.step++; tomoyo_set_lf(head); - /* fall through */ + fallthrough; case 2: if (!tomoyo_read_domain2(head, &domain->acl_info_list)) return; head->r.step++; if (!tomoyo_set_lf(head)) return; - /* fall through */ + fallthrough; case 3: head->r.step = 0; if (head->r.print_this_domain_only) @@ -2088,7 +2088,7 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) /* Check max_learning_entry parameter. */ if (tomoyo_domain_quota_is_ok(r)) break; - /* fall through */ + fallthrough; default: return 0; } @@ -2710,13 +2710,13 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, case TOMOYO_DOMAINPOLICY: if (tomoyo_select_domain(head, cp0)) continue; - /* fall through */ + fallthrough; case TOMOYO_EXCEPTIONPOLICY: if (!strcmp(cp0, "select transition_only")) { head->r.print_transition_related_only = true; continue; } - /* fall through */ + fallthrough; default: if (!tomoyo_manager()) { error = -EPERM; diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 86f7d1b90212a9b0f6bb6ce461d8e95b7a164c32..051f7297877cb963887d88118817779b9464d1ef 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -927,7 +927,7 @@ int tomoyo_path2_perm(const u8 operation, const struct path *path1, case TOMOYO_TYPE_LINK: if (!d_is_dir(path1->dentry)) break; - /* fall through */ + fallthrough; case TOMOYO_TYPE_PIVOT_ROOT: tomoyo_add_slash(&buf1); tomoyo_add_slash(&buf2); diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 3788906421a73d2b34979fbf4fbfbd33fc8010e8..fe27034f28460e5e09c3f8e9029eec52daf114b9 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, snd_BUG(); return -EINVAL; } - if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) - return -ENXIO; + if (!snd_pcm_format_linear(format->format)) + return -EINVAL; err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", src_format, dst_format, diff --git a/sound/core/timer.c b/sound/core/timer.c index d9f85f2d66a3d3abb6e3223c71209d219d9f8355..6e27d87b18ed6b9e696321d0fe6767bb553fb60d 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -816,9 +816,9 @@ static void snd_timer_clear_callbacks(struct snd_timer *timer, * timer tasklet * */ -static void snd_timer_tasklet(unsigned long arg) +static void snd_timer_tasklet(struct tasklet_struct *t) { - struct snd_timer *timer = (struct snd_timer *) arg; + struct snd_timer *timer = from_tasklet(timer, t, task_queue); unsigned long flags; if (timer->card && timer->card->shutdown) { @@ -967,8 +967,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, INIT_LIST_HEAD(&timer->ack_list_head); INIT_LIST_HEAD(&timer->sack_list_head); spin_lock_init(&timer->lock); - tasklet_init(&timer->task_queue, snd_timer_tasklet, - (unsigned long)timer); + tasklet_setup(&timer->task_queue, snd_timer_tasklet); timer->max_instances = 1000; /* default limit per timer */ if (card != NULL) { timer->module = card->module; diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index f8586f75441db690134f1913cf4413c97c446e6f..ee1c428b1fd36493feb3732cc47ad38a8b8beb06 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -64,7 +64,7 @@ #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header. #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. -static void pcm_period_tasklet(unsigned long data); +static void pcm_period_tasklet(struct tasklet_struct *t); /** * amdtp_stream_init - initialize an AMDTP stream structure @@ -94,7 +94,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, s->flags = flags; s->context = ERR_PTR(-1); mutex_init(&s->mutex); - tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); + tasklet_setup(&s->period_tasklet, pcm_period_tasklet); s->packet_index = 0; init_waitqueue_head(&s->callback_wait); @@ -441,9 +441,9 @@ static void update_pcm_pointers(struct amdtp_stream *s, } } -static void pcm_period_tasklet(unsigned long data) +static void pcm_period_tasklet(struct tasklet_struct *t) { - struct amdtp_stream *s = (void *)data; + struct amdtp_stream *s = from_tasklet(s, t, period_tasklet); struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); if (pcm) diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index c84b913a9fe010d391740c14013ed743fcf584df..ab8408966ec33650466571153d5936e64d5685c3 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -14,6 +14,7 @@ MODULE_LICENSE("GPL v2"); #define VENDOR_DIGIDESIGN 0x00a07e #define MODEL_CONSOLE 0x000001 #define MODEL_RACK 0x000002 +#define SPEC_VERSION 0x000001 static int name_card(struct snd_dg00x *dg00x) { @@ -175,14 +176,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { /* Both of 002/003 use the same ID. */ { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_CONSOLE, }, { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_RACK, }, {} diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index 5dac0d9fc58e513c9d5de58c81e6c382a056c411..75f2edd8e78fb2b5f92f65f9bb34988b2a4e24f6 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -39,9 +39,6 @@ static const struct snd_tscm_spec model_specs[] = { .midi_capture_ports = 2, .midi_playback_ports = 4, }, - // This kernel module doesn't support FE-8 because the most of features - // can be implemented in userspace without any specific support of this - // module. }; static int identify_model(struct snd_tscm *tscm) @@ -211,11 +208,39 @@ static void snd_tscm_remove(struct fw_unit *unit) } static const struct ieee1394_device_id snd_tscm_id_table[] = { + // Tascam, FW-1884. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800000, + }, + // Tascam, FE-8 (.version = 0x800001) + // This kernel module doesn't support FE-8 because the most of features + // can be implemented in userspace without any specific support of this + // module. + // + // .version = 0x800002 is unknown. + // + // Tascam, FW-1082. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800003, + }, + // Tascam, FW-1804. { .match_flags = IEEE1394_MATCH_VENDOR_ID | - IEEE1394_MATCH_SPECIFIER_ID, + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, .vendor_id = 0x00022e, .specifier_id = 0x00022e, + .version = 0x800004, }, {} }; diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 09ddab5f5caebebe39b4a5a8363fe7fde30a696d..9766f6af87430db6068b812c7308f2767d8ad30b 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c @@ -46,6 +46,18 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, INIT_LIST_HEAD(&bus->hlink_list); init_waitqueue_head(&bus->rirb_wq); bus->irq = -1; + + /* + * Default value of '8' is as per the HD audio specification (Rev 1.0a). + * Following relation is used to derive STRIPE control value. + * For sample rate <= 48K: + * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 } + * For sample rate > 48K: + * { ((num_channels * bits_per_sample * rate/48000) / + * number of SDOs) >= 8 } + */ + bus->sdo_limit = 8; + return 0; } EXPORT_SYMBOL_GPL(snd_hdac_bus_init); diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 011b17cc1efa2f103960ddfd8e164a0fe8473f01..b98449fd92f3b184d0d4312eef1e84f652528318 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -529,17 +529,6 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) bus->chip_init = true; - /* - * Default value of '8' is as per the HD audio specification (Rev 1.0a). - * Following relation is used to derive STRIPE control value. - * For sample rate <= 48K: - * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 } - * For sample rate > 48K: - * { ((num_channels * bits_per_sample * rate/48000) / - * number of SDOs) >= 8 } - */ - bus->sdo_limit = 8; - return true; } EXPORT_SYMBOL_GPL(snd_hdac_bus_init_chip); diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index 333220f0f8afc0a8496c932bcba4657ac7922683..3e9e9ac804f629813b55de6d84cc8e092e0c4e4c 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); void snd_hdac_device_exit(struct hdac_device *codec) { pm_runtime_put_noidle(&codec->dev); + /* keep balance of runtime PM child_count in parent device */ + pm_runtime_set_suspended(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); kfree(codec->vendor_name); kfree(codec->chip_name); diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index 99aec73491676f1b7ba36b10d513573b5275da5a..1c5114dedda92f8e51857309feba50a8bd8728dd 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -54,7 +54,7 @@ static const struct config_entry config_table[] = { #endif /* * Apollolake (Broxton-P) - * the legacy HDaudio driver is used except on Up Squared (SOF) and + * the legacy HDAudio driver is used except on Up Squared (SOF) and * Chromebooks (SST) */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE) @@ -89,7 +89,7 @@ static const struct config_entry config_table[] = { }, #endif /* - * Skylake and Kabylake use legacy HDaudio driver except for Google + * Skylake and Kabylake use legacy HDAudio driver except for Google * Chromebooks (SST) */ @@ -135,7 +135,7 @@ static const struct config_entry config_table[] = { #endif /* - * Geminilake uses legacy HDaudio driver except for Google + * Geminilake uses legacy HDAudio driver except for Google * Chromebooks */ /* Geminilake */ @@ -157,7 +157,7 @@ static const struct config_entry config_table[] = { /* * CoffeeLake, CannonLake, CometLake, IceLake, TigerLake use legacy - * HDaudio driver except for Google Chromebooks and when DMICs are + * HDAudio driver except for Google Chromebooks and when DMICs are * present. Two cases are required since Coreboot does not expose NHLT * tables. * @@ -391,7 +391,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci) if (pci->class == 0x040300) return SND_INTEL_DSP_DRIVER_LEGACY; if (pci->class != 0x040100 && pci->class != 0x040380) { - dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDA legacy driver\n", pci->class); + dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDAudio legacy driver\n", pci->class); return SND_INTEL_DSP_DRIVER_LEGACY; } diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c index 5363d88cc4b936325e17bf1a1f32d126670b2915..2e5a5c5279e8d1ebc9be1f9585d0aa8d76770c25 100644 --- a/sound/isa/sscape.c +++ b/sound/isa/sscape.c @@ -308,7 +308,7 @@ static inline int verify_mpu401(const struct snd_mpu401 *mpu) } /* - * This is apparently the standard way to initailise an MPU-401 + * This is apparently the standard way to initialise an MPU-401 */ static inline void initialise_mpu401(const struct snd_mpu401 *mpu) { @@ -339,7 +339,7 @@ static void soundscape_free(struct snd_card *c) } /* - * Tell the SoundScape to begin a DMA tranfer using the given channel. + * Tell the SoundScape to begin a DMA transfer using the given channel. * All locking issues are left to the caller. */ static void sscape_start_dma_unsafe(unsigned io_base, enum GA_REG reg) @@ -803,7 +803,7 @@ static int mpu401_open(struct snd_mpu401 *mpu) } /* - * Initialse an MPU-401 subdevice for MIDI support on the SoundScape. + * Initialise an MPU-401 subdevice for MIDI support on the SoundScape. */ static int create_mpu401(struct snd_card *card, int devnum, unsigned long port, int irq) diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c index 023c35a2a95139eed93c633b41b2c61934880d99..35e76480306e7edc460df4b8c3886d3ed81e11d6 100644 --- a/sound/pci/asihpi/asihpi.c +++ b/sound/pci/asihpi/asihpi.c @@ -921,10 +921,10 @@ static void snd_card_asihpi_timer_function(struct timer_list *t) add_timer(&dpcm->timer); } -static void snd_card_asihpi_int_task(unsigned long data) +static void snd_card_asihpi_int_task(struct tasklet_struct *t) { - struct hpi_adapter *a = (struct hpi_adapter *)data; - struct snd_card_asihpi *asihpi; + struct snd_card_asihpi *asihpi = from_tasklet(asihpi, t, t); + struct hpi_adapter *a = asihpi->hpi; WARN_ON(!a || !a->snd_card || !a->snd_card->private_data); asihpi = (struct snd_card_asihpi *)a->snd_card->private_data; @@ -2871,8 +2871,7 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev, if (hpi->interrupt_mode) { asihpi->pcm_start = snd_card_asihpi_pcm_int_start; asihpi->pcm_stop = snd_card_asihpi_pcm_int_stop; - tasklet_init(&asihpi->t, snd_card_asihpi_int_task, - (unsigned long)hpi); + tasklet_setup(&asihpi->t, snd_card_asihpi_int_task); hpi->interrupt_callback = snd_card_asihpi_isr; } else { asihpi->pcm_start = snd_card_asihpi_pcm_timer_start; diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index 70d775ff967ebed7451b9a65c5e173aa56a58387..c189f70c82cb9a1a9f98aef77ba4e861184e9195 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -537,7 +537,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, else /* Power down */ chip->spi_dac_reg[reg] |= bit; - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) + return -ENXIO; } return 0; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e34a4d5d047c18e5eeea9968e0f9915ed7476dc1..36a9dbc33aa01f798aeebefabf29564bdcacec9a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2127,9 +2127,10 @@ static int azx_probe(struct pci_dev *pci, */ if (dmic_detect) { err = snd_intel_dsp_driver_probe(pci); - if (err != SND_INTEL_DSP_DRIVER_ANY && - err != SND_INTEL_DSP_DRIVER_LEGACY) + if (err != SND_INTEL_DSP_DRIVER_ANY && err != SND_INTEL_DSP_DRIVER_LEGACY) { + dev_dbg(&pci->dev, "HDAudio driver not selected, aborting probe\n"); return -ENODEV; + } } else { dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n"); } @@ -2745,8 +2746,6 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, - /* Loongson */ - { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index c94553bcca888fd445acbe0916299925adaf97d2..70164d1428d404661120da07b4f1baf3b0185df5 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -179,6 +179,10 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev) struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); if (chip && chip->running) { + /* enable controller wake up event */ + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | + STATESTS_INT_MASK); + azx_stop_chip(chip); azx_enter_link_reset(chip); } @@ -200,6 +204,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) if (chip && chip->running) { hda_tegra_init(hda); azx_init_chip(chip, 1); + /* disable controller wake up event*/ + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & + ~STATESTS_INT_MASK); } return 0; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index b8c8490e568b7eab9527af923b3d89ddd3b38cfe..40205008809052a4a28fabb23d4ecac12c56a831 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2794,6 +2794,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec, hda_nid_t cvt_nid) { if (per_pin) { + haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid); snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); intel_verify_pin_cvt_connect(codec, per_pin); @@ -3734,6 +3735,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) static int patch_tegra_hdmi(struct hda_codec *codec) { + struct hdmi_spec *spec; int err; err = patch_generic_hdmi(codec); @@ -3741,6 +3743,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) return err; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; + spec = codec->spec; + spec->chmap.ops.chmap_cea_alloc_validate_get_type = + nvhdmi_chmap_cea_alloc_validate_get_type; + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; return 0; } @@ -4263,6 +4269,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), +HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7f9d3527373495180e31c02fa7d56662bc6e3318..c521a1f170969aabea99b5c627486d0985c97de8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2475,6 +2475,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), + SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), @@ -5867,6 +5868,39 @@ static void alc275_fixup_gpio4_off(struct hda_codec *codec, } } +/* Quirk for Thinkpad X1 7th and 8th Gen + * The following fixed routing needed + * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly + * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC + * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp + */ +static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ + static const hda_nid_t preferred_pairs[] = { + 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0 + }; + struct alc_spec *spec = codec->spec; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + spec->gen.preferred_dacs = preferred_pairs; + break; + case HDA_FIXUP_ACT_BUILD: + /* The generic parser creates somewhat unintuitive volume ctls + * with the fixed routing above, and the shared DAC2 may be + * confusing for PA. + * Rename those to unique names so that PA doesn't touch them + * and use only Master volume. + */ + rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume"); + rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume"); + break; + } +} + static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -6135,6 +6169,7 @@ enum { ALC289_FIXUP_DUAL_SPK, ALC294_FIXUP_SPK2_TO_DAC1, ALC294_FIXUP_ASUS_DUAL_SPK, + ALC285_FIXUP_THINKPAD_X1_GEN7, ALC285_FIXUP_THINKPAD_HEADSET_JACK, ALC294_FIXUP_ASUS_HPE, ALC294_FIXUP_ASUS_COEF_1B, @@ -7280,11 +7315,17 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_SPK2_TO_DAC1 }, + [ALC285_FIXUP_THINKPAD_X1_GEN7] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_thinkpad_x1_gen7, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI + }, [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_jack, .chained = true, - .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1 + .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7 }, [ALC294_FIXUP_ASUS_HPE] = { .type = HDA_FIXUP_VERBS, @@ -7694,6 +7735,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), @@ -7955,6 +7999,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"}, {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, {} }; #define ALC225_STANDARD_PINS \ diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c index b4f30028182272f7f2f234e0eb1516619d280a3c..098c69b3b7aaeebc81628da74e0910c1749c5e4e 100644 --- a/sound/pci/riptide/riptide.c +++ b/sound/pci/riptide/riptide.c @@ -1070,9 +1070,9 @@ getmixer(struct cmdif *cif, short num, unsigned short *rval, return 0; } -static void riptide_handleirq(unsigned long dev_id) +static void riptide_handleirq(struct tasklet_struct *t) { - struct snd_riptide *chip = (void *)dev_id; + struct snd_riptide *chip = from_tasklet(chip, t, riptide_tq); struct cmdif *cif = chip->cif; struct snd_pcm_substream *substream[PLAYBACK_SUBSTREAMS + 1]; struct snd_pcm_runtime *runtime; @@ -1843,7 +1843,7 @@ snd_riptide_create(struct snd_card *card, struct pci_dev *pci, chip->received_irqs = 0; chip->handled_irqs = 0; chip->cif = NULL; - tasklet_init(&chip->riptide_tq, riptide_handleirq, (unsigned long)chip); + tasklet_setup(&chip->riptide_tq, riptide_handleirq); if ((chip->res_port = request_region(chip->port, 64, "RIPTIDE")) == NULL) { diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index 227aece17e3908468b285713230dc34b4b0da1dd..dda56ecfd33b950b5a945d6752df9fce0a65d2ac 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -3791,9 +3791,9 @@ static int snd_hdsp_set_defaults(struct hdsp *hdsp) return 0; } -static void hdsp_midi_tasklet(unsigned long arg) +static void hdsp_midi_tasklet(struct tasklet_struct *t) { - struct hdsp *hdsp = (struct hdsp *)arg; + struct hdsp *hdsp = from_tasklet(hdsp, t, midi_tasklet); if (hdsp->midi[0].pending) snd_hdsp_midi_input_read (&hdsp->midi[0]); @@ -5182,7 +5182,7 @@ static int snd_hdsp_create(struct snd_card *card, spin_lock_init(&hdsp->lock); - tasklet_init(&hdsp->midi_tasklet, hdsp_midi_tasklet, (unsigned long)hdsp); + tasklet_setup(&hdsp->midi_tasklet, hdsp_midi_tasklet); pci_read_config_word(hdsp->pci, PCI_CLASS_REVISION, &hdsp->firmware_rev); hdsp->firmware_rev &= 0xff; diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 0fa49f4d15cff7a2a2f27f7f3db97e25eb1c1a36..572350aaf18d05133159d27546764957df3103b8 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -2169,9 +2169,9 @@ static int snd_hdspm_create_midi(struct snd_card *card, } -static void hdspm_midi_tasklet(unsigned long arg) +static void hdspm_midi_tasklet(struct tasklet_struct *t) { - struct hdspm *hdspm = (struct hdspm *)arg; + struct hdspm *hdspm = from_tasklet(hdspm, t, midi_tasklet); int i = 0; while (i < hdspm->midiPorts) { @@ -6836,8 +6836,7 @@ static int snd_hdspm_create(struct snd_card *card, } - tasklet_init(&hdspm->midi_tasklet, - hdspm_midi_tasklet, (unsigned long) hdspm); + tasklet_setup(&hdspm->midi_tasklet, hdspm_midi_tasklet); if (hdspm->io_type != MADIface) { diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c index b8161a08f2ca98a1b0aae5954dfbf15181561e6e..58bb49fff18471a25126ef1732ecaf7662a7ed24 100644 --- a/sound/ppc/snd_ps3.c +++ b/sound/ppc/snd_ps3.c @@ -227,14 +227,14 @@ static int snd_ps3_program_dma(struct snd_ps3_card_info *card, switch (filltype) { case SND_PS3_DMA_FILLTYPE_SILENT_FIRSTFILL: silent = 1; - /* intentionally fall thru */ + fallthrough; case SND_PS3_DMA_FILLTYPE_FIRSTFILL: ch0_kick_event = PS3_AUDIO_KICK_EVENT_ALWAYS; break; case SND_PS3_DMA_FILLTYPE_SILENT_RUNNING: silent = 1; - /* intentionally fall thru */ + fallthrough; case SND_PS3_DMA_FILLTYPE_RUNNING: ch0_kick_event = PS3_AUDIO_KICK_EVENT_SERIALOUT0_EMPTY; break; diff --git a/sound/soc/amd/acp3x-rt5682-max9836.c b/sound/soc/amd/acp3x-rt5682-max9836.c index 55815fdaa1aafeffb3813869529e8f0a128bcf21..406526e79af34a33b101feb6069e9b71f65d1b27 100644 --- a/sound/soc/amd/acp3x-rt5682-max9836.c +++ b/sound/soc/amd/acp3x-rt5682-max9836.c @@ -138,7 +138,7 @@ static int acp3x_1015_hw_params(struct snd_pcm_substream *substream, srate = params_rate(params); for_each_rtd_codec_dais(rtd, i, codec_dai) { - if (strcmp(codec_dai->component->name, "rt1015-aif")) + if (strcmp(codec_dai->name, "rt1015-aif")) continue; ret = snd_soc_dai_set_bclk_ratio(codec_dai, 64); if (ret < 0) diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c index 623dfd3ea70517377dfde0c08c345141c100d3c7..7b14d9a81b97aa85fd391d0f8c2f848d6433b762 100644 --- a/sound/soc/amd/renoir/acp3x-pdm-dma.c +++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c @@ -314,40 +314,30 @@ static int acp_pdm_dma_close(struct snd_soc_component *component, return 0; } -static int acp_pdm_dai_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) +static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) { struct pdm_stream_instance *rtd; + int ret; + bool pdm_status; unsigned int ch_mask; rtd = substream->runtime->private_data; - switch (params_channels(params)) { + ret = 0; + switch (substream->runtime->channels) { case TWO_CH: ch_mask = 0x00; break; default: return -EINVAL; } - rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS); - rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base + - ACP_WOV_PDM_DECIMATION_FACTOR); - return 0; -} - -static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream, - int cmd, struct snd_soc_dai *dai) -{ - struct pdm_stream_instance *rtd; - int ret; - bool pdm_status; - - rtd = substream->runtime->private_data; - ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS); + rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base + + ACP_WOV_PDM_DECIMATION_FACTOR); rtd->bytescount = acp_pdm_get_byte_count(rtd, substream->stream); pdm_status = check_pdm_dma_status(rtd->acp_base); @@ -369,7 +359,6 @@ static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream, } static struct snd_soc_dai_ops acp_pdm_dai_ops = { - .hw_params = acp_pdm_dai_hw_params, .trigger = acp_pdm_dai_trigger, }; diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c index 3cb63886195ff5a684ec1a4f4f5a7fc46a9ac897..04acc18f2d72ceeb5f644112d6d06836009eedcc 100644 --- a/sound/soc/atmel/mchp-i2s-mcc.c +++ b/sound/soc/atmel/mchp-i2s-mcc.c @@ -536,7 +536,7 @@ static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream, /* cpu is BCLK master */ mrb |= MCHP_I2SMCC_MRB_CLKSEL_INT; set_divs = 1; - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_CBM_CFM: /* cpu is slave */ mra |= MCHP_I2SMCC_MRA_MODE_SLAVE; diff --git a/sound/soc/codecs/jz4770.c b/sound/soc/codecs/jz4770.c index c0a28f06b09a7d845d67f2f4a6018952df375113..298689a07168d02e564ea4f84994390567933d9f 100644 --- a/sound/soc/codecs/jz4770.c +++ b/sound/soc/codecs/jz4770.c @@ -202,7 +202,7 @@ static int jz4770_codec_set_bias_level(struct snd_soc_component *codec, REG_CR_VIC_SB_SLEEP, REG_CR_VIC_SB_SLEEP); regmap_update_bits(regmap, JZ4770_CODEC_REG_CR_VIC, REG_CR_VIC_SB, REG_CR_VIC_SB); - /* fall-through */ + fallthrough; default: break; } diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 4428c62e25cf03e3ea0fe070c6d00ceb30928f9e..3ddd822240e3aa05fc8381f1aebf11be933fb723 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -19,8 +19,8 @@ #define CDC_D_REVISION1 (0xf000) #define CDC_D_PERPH_SUBTYPE (0xf005) -#define CDC_D_INT_EN_SET (0x015) -#define CDC_D_INT_EN_CLR (0x016) +#define CDC_D_INT_EN_SET (0xf015) +#define CDC_D_INT_EN_CLR (0xf016) #define MBHC_SWITCH_INT BIT(7) #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6) #define MBHC_BUTTON_PRESS_DET BIT(5) diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c index f0da55901dcbe3e0c35a30f4452d758e9b795824..b8845f45549ea2d2eb9264b4cee5b1e8f4ddad73 100644 --- a/sound/soc/codecs/pcm186x.c +++ b/sound/soc/codecs/pcm186x.c @@ -401,7 +401,7 @@ static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format) break; case SND_SOC_DAIFMT_DSP_A: priv->tdm_offset += 1; - /* fall through */ + fallthrough; /* DSP_A uses the same basic config as DSP_B * except we need to shift the TDM output by one BCK cycle */ diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index 68a3b48e6b31f16c15bc1bf11739127583c8149d..3bce9a14f0f31e04915bdb17eb5aba59814b6675 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -412,8 +412,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wm8994 *control = dev_get_drvdata(component->dev->parent); int i; + if (control->type != WM8958) + return 0; + switch (event) { case SND_SOC_DAPM_POST_PMU: case SND_SOC_DAPM_PRE_PMU: diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 317916cb4e27bd184874531373b8077cec4eb9b0..0623a2251084ec7b5d5541c4a3d6016b323d0a9b 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -151,7 +151,6 @@ static const struct reg_default wm8962_reg[] = { { 40, 0x0000 }, /* R40 - SPKOUTL volume */ { 41, 0x0000 }, /* R41 - SPKOUTR volume */ - { 48, 0x0000 }, /* R48 - Additional control(4) */ { 49, 0x0010 }, /* R49 - Class D Control 1 */ { 51, 0x0003 }, /* R51 - Class D Control 2 */ @@ -842,6 +841,7 @@ static bool wm8962_readable_register(struct device *dev, unsigned int reg) case WM8962_SPKOUTL_VOLUME: case WM8962_SPKOUTR_VOLUME: case WM8962_THERMAL_SHUTDOWN_STATUS: + case WM8962_ADDITIONAL_CONTROL_4: case WM8962_CLASS_D_CONTROL_1: case WM8962_CLASS_D_CONTROL_2: case WM8962_CLOCKING_4: diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index a84ae879d37e673b3da0bd26362d46ba2368059e..038be667c1a61813aa82112d9d2b7b0ac66f409f 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -43,10 +43,12 @@ #define WM8994_NUM_DRC 3 #define WM8994_NUM_EQ 3 -static struct { +struct wm8994_reg_mask { unsigned int reg; unsigned int mask; -} wm8994_vu_bits[] = { +}; + +static struct wm8994_reg_mask wm8994_vu_bits[] = { { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, @@ -60,14 +62,10 @@ static struct { { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU }, { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU }, - { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, - { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU }, { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU }, { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU }, { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU }, - { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, - { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU }, { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU }, @@ -76,6 +74,14 @@ static struct { { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU }, }; +/* VU bitfields for ADC2, DAC2 not available on WM1811 */ +static struct wm8994_reg_mask wm8994_adc2_dac2_vu_bits[] = { + { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, + { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, + { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, + { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, +}; + static int wm8994_drc_base[] = { WM8994_AIF1_DRC1_1, WM8994_AIF1_DRC2_1, @@ -1030,6 +1036,26 @@ static bool wm8994_check_class_w_digital(struct snd_soc_component *component) return true; } +static void wm8994_update_vu_bits(struct snd_soc_component *component) +{ + struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component); + struct wm8994 *control = wm8994->wm8994; + int i; + + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) + snd_soc_component_write(component, wm8994_vu_bits[i].reg, + snd_soc_component_read(component, + wm8994_vu_bits[i].reg)); + if (control->type == WM1811) + return; + + for (i = 0; i < ARRAY_SIZE(wm8994_adc2_dac2_vu_bits); i++) + snd_soc_component_write(component, + wm8994_adc2_dac2_vu_bits[i].reg, + snd_soc_component_read(component, + wm8994_adc2_dac2_vu_bits[i].reg)); +} + static int aif_mclk_set(struct snd_soc_component *component, int aif, bool enable) { struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component); @@ -1076,7 +1102,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component); struct wm8994 *control = wm8994->wm8994; int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; - int ret, i; + int ret; int dac; int adc; int val; @@ -1144,10 +1170,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, break; case SND_SOC_DAPM_POST_PMU: - for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) - snd_soc_component_write(component, wm8994_vu_bits[i].reg, - snd_soc_component_read(component, - wm8994_vu_bits[i].reg)); + wm8994_update_vu_bits(component); break; case SND_SOC_DAPM_PRE_PMD: @@ -1181,7 +1204,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - int ret, i; + int ret; int dac; int adc; int val; @@ -1237,10 +1260,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, break; case SND_SOC_DAPM_POST_PMU: - for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) - snd_soc_component_write(component, wm8994_vu_bits[i].reg, - snd_soc_component_read(component, - wm8994_vu_bits[i].reg)); + wm8994_update_vu_bits(component); break; case SND_SOC_DAPM_PRE_PMD: @@ -4346,6 +4366,14 @@ static int wm8994_component_probe(struct snd_soc_component *component) wm8994_vu_bits[i].mask, wm8994_vu_bits[i].mask); + if (control->type != WM1811) { + for (i = 0; i < ARRAY_SIZE(wm8994_adc2_dac2_vu_bits); i++) + snd_soc_component_update_bits(component, + wm8994_adc2_dac2_vu_bits[i].reg, + wm8994_adc2_dac2_vu_bits[i].mask, + wm8994_adc2_dac2_vu_bits[i].mask); + } + /* Set the low bit of the 3D stereo depth so TLV matches */ snd_soc_component_update_bits(component, WM8994_AIF1_DAC1_FILTERS_2, 1 << WM8994_AIF1DAC1_3D_GAIN_SHIFT, diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c index de136c0a497dd8ba780213019860c31e019e6d6b..52adedc03245b6a4895bc2adf5329d3c79e1c5b8 100644 --- a/sound/soc/fsl/fsl-asoc-card.c +++ b/sound/soc/fsl/fsl-asoc-card.c @@ -73,6 +73,7 @@ struct cpu_priv { * @codec_priv: CODEC private data * @cpu_priv: CPU private data * @card: ASoC card structure + * @streams: Mask of current active streams * @sample_rate: Current sample rate * @sample_format: Current sample format * @asrc_rate: ASRC sample rate used by Back-Ends @@ -89,6 +90,7 @@ struct fsl_asoc_card_priv { struct codec_priv codec_priv; struct cpu_priv cpu_priv; struct snd_soc_card card; + u8 streams; u32 sample_rate; snd_pcm_format_t sample_format; u32 asrc_rate; @@ -151,21 +153,17 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream, struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card); bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; + struct codec_priv *codec_priv = &priv->codec_priv; struct cpu_priv *cpu_priv = &priv->cpu_priv; struct device *dev = rtd->card->dev; + unsigned int pll_out; int ret; priv->sample_rate = params_rate(params); priv->sample_format = params_format(params); + priv->streams |= BIT(substream->stream); - /* - * If codec-dai is DAI Master and all configurations are already in the - * set_bias_level(), bypass the remaining settings in hw_params(). - * Note: (dai_fmt & CBM_CFM) includes CBM_CFM and CBM_CFS. - */ - if ((priv->card.set_bias_level && - priv->dai_fmt & SND_SOC_DAIFMT_CBM_CFM) || - fsl_asoc_card_is_ac97(priv)) + if (fsl_asoc_card_is_ac97(priv)) return 0; /* Specific configurations of DAIs starts from here */ @@ -174,7 +172,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream, cpu_priv->sysclk_dir[tx]); if (ret && ret != -ENOTSUPP) { dev_err(dev, "failed to set sysclk for cpu dai\n"); - return ret; + goto fail; } if (cpu_priv->slot_width) { @@ -182,6 +180,68 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream, cpu_priv->slot_width); if (ret && ret != -ENOTSUPP) { dev_err(dev, "failed to set TDM slot for cpu dai\n"); + goto fail; + } + } + + /* Specific configuration for PLL */ + if (codec_priv->pll_id && codec_priv->fll_id) { + if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE) + pll_out = priv->sample_rate * 384; + else + pll_out = priv->sample_rate * 256; + + ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0), + codec_priv->pll_id, + codec_priv->mclk_id, + codec_priv->mclk_freq, pll_out); + if (ret) { + dev_err(dev, "failed to start FLL: %d\n", ret); + goto fail; + } + + ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0), + codec_priv->fll_id, + pll_out, SND_SOC_CLOCK_IN); + + if (ret && ret != -ENOTSUPP) { + dev_err(dev, "failed to set SYSCLK: %d\n", ret); + goto fail; + } + } + + return 0; + +fail: + priv->streams &= ~BIT(substream->stream); + return ret; +} + +static int fsl_asoc_card_hw_free(struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card); + struct codec_priv *codec_priv = &priv->codec_priv; + struct device *dev = rtd->card->dev; + int ret; + + priv->streams &= ~BIT(substream->stream); + + if (!priv->streams && codec_priv->pll_id && codec_priv->fll_id) { + /* Force freq to be 0 to avoid error message in codec */ + ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0), + codec_priv->mclk_id, + 0, + SND_SOC_CLOCK_IN); + if (ret) { + dev_err(dev, "failed to switch away from FLL: %d\n", ret); + return ret; + } + + ret = snd_soc_dai_set_pll(asoc_rtd_to_codec(rtd, 0), + codec_priv->pll_id, 0, 0, 0); + if (ret && ret != -ENOTSUPP) { + dev_err(dev, "failed to stop FLL: %d\n", ret); return ret; } } @@ -191,6 +251,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream, static const struct snd_soc_ops fsl_asoc_card_ops = { .hw_params = fsl_asoc_card_hw_params, + .hw_free = fsl_asoc_card_hw_free, }; static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, @@ -254,75 +315,6 @@ static struct snd_soc_dai_link fsl_asoc_card_dai[] = { }, }; -static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card, - struct snd_soc_dapm_context *dapm, - enum snd_soc_bias_level level) -{ - struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(card); - struct snd_soc_pcm_runtime *rtd; - struct snd_soc_dai *codec_dai; - struct codec_priv *codec_priv = &priv->codec_priv; - struct device *dev = card->dev; - unsigned int pll_out; - int ret; - - rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]); - codec_dai = asoc_rtd_to_codec(rtd, 0); - if (dapm->dev != codec_dai->dev) - return 0; - - switch (level) { - case SND_SOC_BIAS_PREPARE: - if (dapm->bias_level != SND_SOC_BIAS_STANDBY) - break; - - if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE) - pll_out = priv->sample_rate * 384; - else - pll_out = priv->sample_rate * 256; - - ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id, - codec_priv->mclk_id, - codec_priv->mclk_freq, pll_out); - if (ret) { - dev_err(dev, "failed to start FLL: %d\n", ret); - return ret; - } - - ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->fll_id, - pll_out, SND_SOC_CLOCK_IN); - if (ret && ret != -ENOTSUPP) { - dev_err(dev, "failed to set SYSCLK: %d\n", ret); - return ret; - } - break; - - case SND_SOC_BIAS_STANDBY: - if (dapm->bias_level != SND_SOC_BIAS_PREPARE) - break; - - ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id, - codec_priv->mclk_freq, - SND_SOC_CLOCK_IN); - if (ret && ret != -ENOTSUPP) { - dev_err(dev, "failed to switch away from FLL: %d\n", ret); - return ret; - } - - ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id, 0, 0, 0); - if (ret) { - dev_err(dev, "failed to stop FLL: %d\n", ret); - return ret; - } - break; - - default: - break; - } - - return 0; -} - static int fsl_asoc_card_audmux_init(struct device_node *np, struct fsl_asoc_card_priv *priv) { @@ -611,7 +603,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) /* Diversify the card configurations */ if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) { codec_dai_name = "cs42888"; - priv->card.set_bias_level = NULL; priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq; priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq; priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT; @@ -628,26 +619,22 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) { codec_dai_name = "wm8962"; - priv->card.set_bias_level = fsl_asoc_card_set_bias_level; priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK; priv->codec_priv.fll_id = WM8962_SYSCLK_FLL; priv->codec_priv.pll_id = WM8962_FLL; priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) { codec_dai_name = "wm8960-hifi"; - priv->card.set_bias_level = fsl_asoc_card_set_bias_level; priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO; priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO; priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; } else if (of_device_is_compatible(np, "fsl,imx-audio-ac97")) { codec_dai_name = "ac97-hifi"; - priv->card.set_bias_level = NULL; priv->dai_fmt = SND_SOC_DAIFMT_AC97; priv->card.dapm_routes = audio_map_ac97; priv->card.num_dapm_routes = ARRAY_SIZE(audio_map_ac97); } else if (of_device_is_compatible(np, "fsl,imx-audio-mqs")) { codec_dai_name = "fsl-mqs-dai"; - priv->card.set_bias_level = NULL; priv->dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_NB_NF; @@ -657,7 +644,6 @@ static int fsl_asoc_card_probe(struct platform_device *pdev) priv->card.num_dapm_routes = ARRAY_SIZE(audio_map_tx); } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8524")) { codec_dai_name = "wm8524-hifi"; - priv->card.set_bias_level = NULL; priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; priv->dai_link[1].dpcm_capture = 0; priv->dai_link[2].dpcm_capture = 0; diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index 4ae36099ae82f0d146c0cf927113703e4025c5bd..79b861afd98607d8686568ab1759b2fc6e258506 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -708,9 +708,9 @@ static void fsl_esai_trigger_stop(struct fsl_esai *esai_priv, bool tx) ESAI_xFCR_xFR, 0); } -static void fsl_esai_hw_reset(unsigned long arg) +static void fsl_esai_hw_reset(struct tasklet_struct *t) { - struct fsl_esai *esai_priv = (struct fsl_esai *)arg; + struct fsl_esai *esai_priv = from_tasklet(esai_priv, t, task); bool tx = true, rx = false, enabled[2]; unsigned long lock_flags; u32 tfcr, rfcr; @@ -1070,8 +1070,7 @@ static int fsl_esai_probe(struct platform_device *pdev) return ret; } - tasklet_init(&esai_priv->task, fsl_esai_hw_reset, - (unsigned long)esai_priv); + tasklet_setup(&esai_priv->task, fsl_esai_hw_reset); pm_runtime_enable(&pdev->dev); diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index d8b9c6547142055a9815384183a58e120048dd01..404be27c15fed87b1083e2258a91fdb9dfd885c7 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -898,7 +898,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt) "missing baudclk for master mode\n"); return -EINVAL; } - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_CBM_CFS: ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER; break; diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c index 9e4f66b6b92b00f6da379c637f2d51bfde6aba1f..23198488217628e8f988d25adaef8e11cdb809de 100644 --- a/sound/soc/fsl/mpc5200_dma.c +++ b/sound/soc/fsl/mpc5200_dma.c @@ -339,7 +339,6 @@ static int psc_dma_new(struct snd_soc_component *component, static void psc_dma_free(struct snd_soc_component *component, struct snd_pcm *pcm) { - struct snd_soc_pcm_runtime *rtd = pcm->private_data; struct snd_pcm_substream *substream; int stream; diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c index fd5dcd6b9f856521223b82388dd9a4dc94ae6dd9..907f5f1f7b445eb2c7f9363c00e90c5a4cf3a020 100644 --- a/sound/soc/hisilicon/hi6210-i2s.c +++ b/sound/soc/hisilicon/hi6210-i2s.c @@ -261,13 +261,13 @@ static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream, switch (params_format(params)) { case SNDRV_PCM_FORMAT_U16_LE: signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT; - /* fall through */ + fallthrough; case SNDRV_PCM_FORMAT_S16_LE: bits = HII2S_BITS_16; break; case SNDRV_PCM_FORMAT_U24_LE: signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT; - /* fall through */ + fallthrough; case SNDRV_PCM_FORMAT_S24_LE: bits = HII2S_BITS_24; break; diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 49b9f18472bcefe5c848b97c9b8a55c17d1e6147..b1cac7abdc0ab6df91cb82048df1ead8db9aea2f 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, ret_val = power_up_sst(stream); if (ret_val < 0) - return ret_val; + goto out_power_up; /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, @@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); out_ops: - kfree(stream); mutex_unlock(&sst_lock); +out_power_up: + kfree(stream); return ret_val; } diff --git a/sound/soc/intel/baytrail/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c index 54a66cc6db890234f0fa870300293abce64d7042..d2cda33b65d58da1850c34ec6a5ef75b27a06ec0 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-pcm.c +++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c @@ -181,7 +181,7 @@ static int sst_byt_pcm_trigger(struct snd_soc_component *component, break; case SNDRV_PCM_TRIGGER_SUSPEND: pdata->restore_stream = false; - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: sst_byt_stream_pause(byt, pcm_data->stream); break; diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 414ae4bb5224a96d58d2586c2d2d092395ebe988..7ae34b49815c9b6fc873a086e52d8df95ba5176e 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -573,7 +573,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev) break; default: dev_err(dev, "get speaker GPIO failed: %d\n", ret); - /* fall through */ + fallthrough; case -EPROBE_DEFER: return ret; } diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c index 4e2897596cea317814e0d7d8d47032f16216e56f..688b5e0a49e323c619c5bb70666751e8fd261253 100644 --- a/sound/soc/intel/boards/bytcr_rt5651.c +++ b/sound/soc/intel/boards/bytcr_rt5651.c @@ -1009,7 +1009,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Failed to get ext-amp-enable GPIO: %d\n", ret_val); - /* fall through */ + fallthrough; case -EPROBE_DEFER: put_device(codec_dev); return ret_val; @@ -1029,7 +1029,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Failed to get hp-detect GPIO: %d\n", ret_val); - /* fall through */ + fallthrough; case -EPROBE_DEFER: put_device(codec_dev); return ret_val; diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index 5dee55e9546bbd936260eaad2d7b986715e511d2..bbe8d782e0af6a1d4f6eed314775602e10d906ee 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -488,7 +488,7 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd, stream->lpib); snd_hdac_ext_stream_set_lpib(stream, stream->lpib); } - /* fall through */ + fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index 36df30915378cca26e80c93ef235f9ac669d1378..c8664ab80d45ad0b0774370221938a9d79f75620 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -58,17 +58,17 @@ int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask, switch (slot_width) { case 0: slot_width = 32; - /* Fall-through */ + fallthrough; case 32: fmt |= SNDRV_PCM_FMTBIT_S32_LE; - /* Fall-through */ + fallthrough; case 24: fmt |= SNDRV_PCM_FMTBIT_S24_LE; fmt |= SNDRV_PCM_FMTBIT_S20_LE; - /* Fall-through */ + fallthrough; case 16: fmt |= SNDRV_PCM_FMTBIT_S16_LE; - /* Fall-through */ + fallthrough; case 8: fmt |= SNDRV_PCM_FMTBIT_S8; break; @@ -133,7 +133,7 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) case SND_SOC_DAIFMT_CBS_CFM: case SND_SOC_DAIFMT_CBM_CFS: dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); - /* Fall-through */ + fallthrough; default: return -EINVAL; } diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index d1e09ade0190d67404cf796f12a6e1d0a0413cf2..c4e7307a44374ed6bd329d2aa6c6333dce3646a0 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -488,7 +488,7 @@ static int pxa_ssp_configure_dai_fmt(struct ssp_priv *priv) case SND_SOC_DAIFMT_DSP_A: sspsp |= SSPSP_FSRT; - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_DSP_B: sscr0 |= SSCR0_MOD | SSCR0_PSP; sscr1 |= SSCR1_TRAIL | SSCR1_RWOT; diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index 2a5302f1db98aa9b5ad1a8604c723a2f9c36c682..0168af84927270f50dbaac501e2a1e33f17d38e9 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -1150,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component, } static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = { - SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1", "Secondary MI2S Playback SD1", - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL, - 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL, - 0, 0, 0, 0), - SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0), + 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0), }; static const struct snd_soc_component_driver q6afe_dai_component = { diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index eaa95b5a7b6633dcd8262dea53be99617dfc8143..25d23e0266c7ebe1fa50d91f7d0664bc1b2b9a33 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -973,6 +973,20 @@ static int msm_routing_probe(struct snd_soc_component *c) return 0; } +static unsigned int q6routing_reg_read(struct snd_soc_component *component, + unsigned int reg) +{ + /* default value */ + return 0; +} + +static int q6routing_reg_write(struct snd_soc_component *component, + unsigned int reg, unsigned int val) +{ + /* dummy */ + return 0; +} + static const struct snd_soc_component_driver msm_soc_routing_component = { .probe = msm_routing_probe, .name = DRV_NAME, @@ -981,6 +995,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = { .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets), .dapm_routes = intercon, .num_dapm_routes = ARRAY_SIZE(intercon), + .read = q6routing_reg_read, + .write = q6routing_reg_write, }; static int q6pcm_routing_probe(struct platform_device *pdev) diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 1707414cfa9213c6ebd244c98550666dbff75330..5adb293d0435d150a28b18377b85860698440830 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -229,13 +229,13 @@ static int rockchip_pdm_hw_params(struct snd_pcm_substream *substream, switch (params_channels(params)) { case 8: val |= PDM_PATH3_EN; - /* fallthrough */ + fallthrough; case 6: val |= PDM_PATH2_EN; - /* fallthrough */ + fallthrough; case 4: val |= PDM_PATH1_EN; - /* fallthrough */ + fallthrough; case 2: val |= PDM_PATH0_EN; break; diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index 80ecb5c7fed0c1a03110a9a16c73d853ae154bc9..df53d4ea808fe57eed7537f73ae60a11f301d203 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -733,7 +733,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, switch (params_channels(params)) { case 6: val |= MOD_DC2_EN; - /* Fall through */ + fallthrough; case 4: val |= MOD_DC1_EN; break; diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c index bd9de77c35f3eac318af8af78d1d394f91749d86..50fc7810723ec22802ebe026cdf976d3c5a068c5 100644 --- a/sound/soc/sh/siu_pcm.c +++ b/sound/soc/sh/siu_pcm.c @@ -198,9 +198,9 @@ static int siu_pcm_rd_set(struct siu_port *port_info, return 0; } -static void siu_io_tasklet(unsigned long data) +static void siu_io_tasklet(struct tasklet_struct *t) { - struct siu_stream *siu_stream = (struct siu_stream *)data; + struct siu_stream *siu_stream = from_tasklet(siu_stream, t, tasklet); struct snd_pcm_substream *substream = siu_stream->substream; struct device *dev = substream->pcm->card->dev; struct snd_pcm_runtime *rt = substream->runtime; @@ -520,10 +520,8 @@ static int siu_pcm_new(struct snd_soc_component *component, (*port_info)->pcm = pcm; /* IO tasklets */ - tasklet_init(&(*port_info)->playback.tasklet, siu_io_tasklet, - (unsigned long)&(*port_info)->playback); - tasklet_init(&(*port_info)->capture.tasklet, siu_io_tasklet, - (unsigned long)&(*port_info)->capture); + tasklet_setup(&(*port_info)->playback.tasklet, siu_io_tasklet); + tasklet_setup(&(*port_info)->capture.tasklet, siu_io_tasklet); } dev_info(card->dev, "SuperH SIU driver initialized.\n"); diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c index f0b4f4bc44a45117c98d9f9e402c32c44c0c618d..5504b92946e3834745af08ff0b769d75909d3284 100644 --- a/sound/soc/soc-component.c +++ b/sound/soc/soc-component.c @@ -406,7 +406,7 @@ static unsigned int soc_component_read_no_lock( ret = -EIO; if (ret < 0) - soc_component_ret(component, ret); + return soc_component_ret(component, ret); return val; } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 2fe1b2ec7c8fe6542330ff7036306283f1d19aa5..663e3839f25192ece98058cfb563c40861bb1031 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -618,7 +618,7 @@ int snd_soc_suspend(struct device *dev) "ASoC: idle_bias_off CODEC on over suspend\n"); break; } - /* fall through */ + fallthrough; case SND_SOC_BIAS_OFF: snd_soc_component_suspend(component); diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index cee99867131879620d292045b9837950e0c03a0d..5b60379237bffda36e2fdefb99de87e58195b683 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1057,7 +1057,7 @@ static int soc_tplg_denum_create(struct soc_tplg *tplg, unsigned int count, ec->hdr.name); goto err_denum; } - /* fall through */ + fallthrough; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: @@ -1445,7 +1445,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( ec->hdr.name); goto err_se; } - /* fall through */ + fallthrough; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index df1c6997cb4e28aea17266527d1ec85c0259a739..c6cb8c212eca59e84f3ff9e7092b339a8f5a6d8e 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -310,7 +310,7 @@ static int hda_link_pcm_trigger(struct snd_pcm_substream *substream, return ret; } - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: snd_hdac_ext_link_stream_start(link_dev); @@ -333,7 +333,7 @@ static int hda_link_pcm_trigger(struct snd_pcm_substream *substream, link_dev->link_prepared = 0; - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: snd_hdac_ext_link_stream_clear(link_dev); break; diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c index d730e437e4ba84d2d447b5dfb06f47a58af21005..71c3f29057a71276596a0a04aa14efdc1f36a165 100644 --- a/sound/soc/sof/pcm.c +++ b/sound/soc/sof/pcm.c @@ -361,7 +361,7 @@ static int sof_pcm_trigger(struct snd_soc_component *component, return ret; } - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_START: if (spcm->stream[substream->stream].suspend_ignored) { /* @@ -386,7 +386,7 @@ static int sof_pcm_trigger(struct snd_soc_component *component, spcm->stream[substream->stream].suspend_ignored = true; return 0; } - /* fallthrough */ + fallthrough; case SNDRV_PCM_TRIGGER_STOP: stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP; ipc_first = true; diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c index fe7117171a0e7a0441454efeef639c2b28498c84..0cbe31e2c7e9c4cc14a2d4ddc4fee6cd4acc648a 100644 --- a/sound/soc/tegra/tegra186_dspk.c +++ b/sound/soc/tegra/tegra186_dspk.c @@ -71,7 +71,7 @@ static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol, return 0; } -static int tegra186_dspk_runtime_suspend(struct device *dev) +static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev) { struct tegra186_dspk *dspk = dev_get_drvdata(dev); @@ -83,7 +83,7 @@ static int tegra186_dspk_runtime_suspend(struct device *dev) return 0; } -static int tegra186_dspk_runtime_resume(struct device *dev) +static int __maybe_unused tegra186_dspk_runtime_resume(struct device *dev) { struct tegra186_dspk *dspk = dev_get_drvdata(dev); int err; diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c index 4894e8e6ee7fdae7a226c2b75c8fee269f383e9d..1268046b345d9f0d9c685754a48052781c86898d 100644 --- a/sound/soc/tegra/tegra210_admaif.c +++ b/sound/soc/tegra/tegra210_admaif.c @@ -219,7 +219,7 @@ static const struct regmap_config tegra186_admaif_regmap_config = { .cache_type = REGCACHE_FLAT, }; -static int tegra_admaif_runtime_suspend(struct device *dev) +static int __maybe_unused tegra_admaif_runtime_suspend(struct device *dev) { struct tegra_admaif *admaif = dev_get_drvdata(dev); @@ -229,7 +229,7 @@ static int tegra_admaif_runtime_suspend(struct device *dev) return 0; } -static int tegra_admaif_runtime_resume(struct device *dev) +static int __maybe_unused tegra_admaif_runtime_resume(struct device *dev) { struct tegra_admaif *admaif = dev_get_drvdata(dev); diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c index 5123a96fdde8f8f6a6d161cc9e198129ea595ecf..66287a7c9865dc6971c778f90a6f2e438b83d660 100644 --- a/sound/soc/tegra/tegra210_ahub.c +++ b/sound/soc/tegra/tegra210_ahub.c @@ -564,7 +564,7 @@ static const struct of_device_id tegra_ahub_of_match[] = { }; MODULE_DEVICE_TABLE(of, tegra_ahub_of_match); -static int tegra_ahub_runtime_suspend(struct device *dev) +static int __maybe_unused tegra_ahub_runtime_suspend(struct device *dev) { struct tegra_ahub *ahub = dev_get_drvdata(dev); @@ -576,7 +576,7 @@ static int tegra_ahub_runtime_suspend(struct device *dev) return 0; } -static int tegra_ahub_runtime_resume(struct device *dev) +static int __maybe_unused tegra_ahub_runtime_resume(struct device *dev) { struct tegra_ahub *ahub = dev_get_drvdata(dev); int err; diff --git a/sound/soc/tegra/tegra210_dmic.c b/sound/soc/tegra/tegra210_dmic.c index d682414ad90dc0b49594374834637fff3553a9ee..a661f40bc41c77121af07557beeabc51d5e61ce1 100644 --- a/sound/soc/tegra/tegra210_dmic.c +++ b/sound/soc/tegra/tegra210_dmic.c @@ -40,7 +40,7 @@ static const struct reg_default tegra210_dmic_reg_defaults[] = { { TEGRA210_DMIC_LP_BIQUAD_1_COEF_4, 0x0 }, }; -static int tegra210_dmic_runtime_suspend(struct device *dev) +static int __maybe_unused tegra210_dmic_runtime_suspend(struct device *dev) { struct tegra210_dmic *dmic = dev_get_drvdata(dev); @@ -52,7 +52,7 @@ static int tegra210_dmic_runtime_suspend(struct device *dev) return 0; } -static int tegra210_dmic_runtime_resume(struct device *dev) +static int __maybe_unused tegra210_dmic_runtime_resume(struct device *dev) { struct tegra210_dmic *dmic = dev_get_drvdata(dev); int err; diff --git a/sound/soc/tegra/tegra210_i2s.c b/sound/soc/tegra/tegra210_i2s.c index 722092181583e2f87f17ca67bbdf4fbc27b37348..a383bd5c51cd42e3b7e98b5ad8aa1ef9bdbb22ce 100644 --- a/sound/soc/tegra/tegra210_i2s.c +++ b/sound/soc/tegra/tegra210_i2s.c @@ -164,7 +164,7 @@ static int tegra210_i2s_init(struct snd_soc_dapm_widget *w, return tegra210_i2s_sw_reset(compnt, is_playback); } -static int tegra210_i2s_runtime_suspend(struct device *dev) +static int __maybe_unused tegra210_i2s_runtime_suspend(struct device *dev) { struct tegra210_i2s *i2s = dev_get_drvdata(dev); @@ -176,7 +176,7 @@ static int tegra210_i2s_runtime_suspend(struct device *dev) return 0; } -static int tegra210_i2s_runtime_resume(struct device *dev) +static int __maybe_unused tegra210_i2s_runtime_resume(struct device *dev) { struct tegra210_i2s *i2s = dev_get_drvdata(dev); int err; diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c index d89b5c928c4d7b979d400454ce0694a94260f11d..dd34504c09ba8178ab5fff4882a3735df46181f6 100644 --- a/sound/soc/ti/davinci-i2s.c +++ b/sound/soc/ti/davinci-i2s.c @@ -289,7 +289,7 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, * rate is lowered. */ inv_fs = true; - /* fall through */ + fallthrough; case SND_SOC_DAIFMT_DSP_A: dev->mode = MOD_DSP_A; break; diff --git a/sound/soc/ti/n810.c b/sound/soc/ti/n810.c index 2802a33b9c5f1997a575a6001eb19dc366beb536..ed217b34f846ad160355ae1b1040479e3ab3f1d1 100644 --- a/sound/soc/ti/n810.c +++ b/sound/soc/ti/n810.c @@ -46,7 +46,7 @@ static void n810_ext_control(struct snd_soc_dapm_context *dapm) switch (n810_jack_func) { case N810_JACK_HS: line1l = 1; - /* fall through */ + fallthrough; case N810_JACK_HP: hp = 1; break; diff --git a/sound/soc/ti/omap-dmic.c b/sound/soc/ti/omap-dmic.c index 01abf1be5d788c0d602a7c0d486b33acbd9a55c3..a26588e9c3bc283985bf1fbb6db90baea41bd9b2 100644 --- a/sound/soc/ti/omap-dmic.c +++ b/sound/soc/ti/omap-dmic.c @@ -203,10 +203,10 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream, switch (channels) { case 6: dmic->ch_enabled |= OMAP_DMIC_UP3_ENABLE; - /* fall through */ + fallthrough; case 4: dmic->ch_enabled |= OMAP_DMIC_UP2_ENABLE; - /* fall through */ + fallthrough; case 2: dmic->ch_enabled |= OMAP_DMIC_UP1_ENABLE; break; diff --git a/sound/soc/ti/omap-mcpdm.c b/sound/soc/ti/omap-mcpdm.c index d482b62f314a60dc6cf7183098f247a3f8e7333d..fafb2998ad0df27f00d5fc4ac9d1ec07107e3a40 100644 --- a/sound/soc/ti/omap-mcpdm.c +++ b/sound/soc/ti/omap-mcpdm.c @@ -309,19 +309,19 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream, /* up to 3 channels for capture */ return -EINVAL; link_mask |= 1 << 4; - /* fall through */ + fallthrough; case 4: if (stream == SNDRV_PCM_STREAM_CAPTURE) /* up to 3 channels for capture */ return -EINVAL; link_mask |= 1 << 3; - /* fall through */ + fallthrough; case 3: link_mask |= 1 << 2; - /* fall through */ + fallthrough; case 2: link_mask |= 1 << 1; - /* fall through */ + fallthrough; case 1: link_mask |= 1 << 0; break; diff --git a/sound/soc/ti/rx51.c b/sound/soc/ti/rx51.c index 2176a95201bf8b1341ca06102b49d2010d764f68..a2629ccc1dc843abe1b442f0a36ddfe05dd56a73 100644 --- a/sound/soc/ti/rx51.c +++ b/sound/soc/ti/rx51.c @@ -55,7 +55,7 @@ static void rx51_ext_control(struct snd_soc_dapm_context *dapm) break; case RX51_JACK_HS: hs = 1; - /* fall through */ + fallthrough; case RX51_JACK_HP: hp = 1; break; diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c index 4b1cd4da3e368e783f49da8e5b9d05283470b5fa..939b33ec39f50e7623906685bb9e7b0037105079 100644 --- a/sound/soc/txx9/txx9aclc.c +++ b/sound/soc/txx9/txx9aclc.c @@ -134,9 +134,9 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr) #define NR_DMA_CHAIN 2 -static void txx9aclc_dma_tasklet(unsigned long data) +static void txx9aclc_dma_tasklet(struct tasklet_struct *t) { - struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data; + struct txx9aclc_dmadata *dmadata = from_tasklet(dmadata, t, tasklet); struct dma_chan *chan = dmadata->dma_chan; struct dma_async_tx_descriptor *desc; struct snd_pcm_substream *substream = dmadata->substream; @@ -352,8 +352,7 @@ static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev, "playback" : "capture"); return -EBUSY; } - tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet, - (unsigned long)dmadata); + tasklet_setup(&dmadata->tasklet, txx9aclc_dma_tasklet); return 0; } diff --git a/sound/soc/zte/zx-i2s.c b/sound/soc/zte/zx-i2s.c index 568cde64ff8b81a0b82d7f4a5c2350051ca3b7b2..1c1a44e08a676d6ec3602b3f90eb0f897bad0e9d 100644 --- a/sound/soc/zte/zx-i2s.c +++ b/sound/soc/zte/zx-i2s.c @@ -294,7 +294,7 @@ static int zx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, zx_i2s_rx_dma_en(zx_i2s->reg_base, true); else zx_i2s_tx_dma_en(zx_i2s->reg_base, true); - /* fall thru */ + fallthrough; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (capture) @@ -308,7 +308,7 @@ static int zx_i2s_trigger(struct snd_pcm_substream *substream, int cmd, zx_i2s_rx_dma_en(zx_i2s->reg_base, false); else zx_i2s_tx_dma_en(zx_i2s->reg_base, false); - /* fall thru */ + fallthrough; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (capture) diff --git a/sound/soc/zte/zx-spdif.c b/sound/soc/zte/zx-spdif.c index a3a07c0730e691eae7cc9de3b0caf7b597f7f817..b4168bd532b7498be1429960d645ec3fc84aee87 100644 --- a/sound/soc/zte/zx-spdif.c +++ b/sound/soc/zte/zx-spdif.c @@ -218,7 +218,7 @@ static int zx_spdif_trigger(struct snd_pcm_substream *substream, int cmd, val = readl_relaxed(zx_spdif->reg_base + ZX_FIFOCTRL); val |= ZX_FIFOCTRL_TX_FIFO_RST; writel_relaxed(val, zx_spdif->reg_base + ZX_FIFOCTRL); - /* fall thru */ + fallthrough; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: zx_spdif_cfg_tx(zx_spdif->reg_base, true); diff --git a/sound/usb/midi.c b/sound/usb/midi.c index df639fe031181561290ca8f2295dac1c98e78d9e..e8287a05e36bc492922492544fa90846d5e97fa6 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -344,10 +344,9 @@ static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint *ep) spin_unlock_irqrestore(&ep->buffer_lock, flags); } -static void snd_usbmidi_out_tasklet(unsigned long data) +static void snd_usbmidi_out_tasklet(struct tasklet_struct *t) { - struct snd_usb_midi_out_endpoint *ep = - (struct snd_usb_midi_out_endpoint *) data; + struct snd_usb_midi_out_endpoint *ep = from_tasklet(ep, t, tasklet); snd_usbmidi_do_output(ep); } @@ -1441,7 +1440,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi, } spin_lock_init(&ep->buffer_lock); - tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep); + tasklet_setup(&ep->tasklet, snd_usbmidi_out_tasklet); init_waitqueue_head(&ep->drain_wait); for (i = 0; i < 0x10; ++i) diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c index 884e740a785c55af2157ac97cb47ee83e884e39e..3b2dce1043f5a66d85212f68054fefb20e1ffd5a 100644 --- a/sound/usb/misc/ua101.c +++ b/sound/usb/misc/ua101.c @@ -247,9 +247,9 @@ static inline void add_with_wraparound(struct ua101 *ua, *value -= ua->playback.queue_length; } -static void playback_tasklet(unsigned long data) +static void playback_tasklet(struct tasklet_struct *t) { - struct ua101 *ua = (void *)data; + struct ua101 *ua = from_tasklet(ua, t, playback_tasklet); unsigned long flags; unsigned int frames; struct ua101_urb *urb; @@ -1218,8 +1218,7 @@ static int ua101_probe(struct usb_interface *interface, spin_lock_init(&ua->lock); mutex_init(&ua->mutex); INIT_LIST_HEAD(&ua->ready_playback_urbs); - tasklet_init(&ua->playback_tasklet, - playback_tasklet, (unsigned long)ua); + tasklet_setup(&ua->playback_tasklet, playback_tasklet); init_waitqueue_head(&ua->alsa_capture_wait); init_waitqueue_head(&ua->rate_feedback_wait); init_waitqueue_head(&ua->alsa_playback_wait); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 6b0f3a8469efee2ffb1cf472bb5c0c806587cd2e..81e987eaf063757cc0bb8ac6e6e3a298e0405645 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2371,7 +2371,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, int num_ins; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; - int i, err, nameid, type, len; + int i, err, nameid, type, len, val; const struct procunit_info *info; const struct procunit_value_info *valinfo; const struct usbmix_name_map *map; @@ -2474,6 +2474,12 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, break; } + err = get_cur_ctl_value(cval, cval->control << 8, &val); + if (err < 0) { + usb_mixer_elem_info_free(cval); + return -EINVAL; + } + kctl = snd_ctl_new1(&mixer_procunit_ctl, cval); if (!kctl) { usb_mixer_elem_info_free(cval); diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 5600751803cfe1bd322b47d3b2c632468f8ba001..b401ee894e1bb6978b595c750d12254e7b775618 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -369,11 +369,13 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ + case USB_ID(0x0499, 0x172f): /* Steinberg UR22C */ case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */ + case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */ ep = 0x82; ifnum = 0; goto add_sync_ep_from_ifnum; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index d79e3ddc56901809e932bed426475ff7f103b235..23eafd50126f31221d4c7e98cb8fa07168d42ed4 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2678,6 +2678,10 @@ YAMAHA_DEVICE(0x7010, "UB99"), .ifnum = QUIRK_ANY_INTERFACE, .type = QUIRK_COMPOSITE, .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, { .ifnum = 0, .type = QUIRK_AUDIO_FIXED_ENDPOINT, @@ -2690,6 +2694,32 @@ YAMAHA_DEVICE(0x7010, "UB99"), .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE, .endpoint = 0x01, .ep_attr = USB_ENDPOINT_XFER_ISOC, + .datainterval = 1, + .maxpacksize = 0x024c, + .rates = SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 44100, + .rate_max = 48000, + .nr_rates = 2, + .rate_table = (unsigned int[]) { + 44100, 48000 + } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC, + .datainterval = 1, + .maxpacksize = 0x0126, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, @@ -2797,14 +2827,24 @@ YAMAHA_DEVICE(0x7010, "UB99"), /* Lenovo ThinkStation P620 Rear Line-in, Line-out and Microphone */ { USB_DEVICE(0x17aa, 0x1046), - QUIRK_DEVICE_PROFILE("Lenovo", "ThinkStation P620 Rear", - "Lenovo-ThinkStation-P620-Rear"), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .vendor_name = "Lenovo", + .product_name = "ThinkStation P620 Rear", + .profile_name = "Lenovo-ThinkStation-P620-Rear", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_SETUP_DISABLE_AUTOSUSPEND + } }, /* Lenovo ThinkStation P620 Internal Speaker + Front Headset */ { USB_DEVICE(0x17aa, 0x104d), - QUIRK_DEVICE_PROFILE("Lenovo", "ThinkStation P620 Main", - "Lenovo-ThinkStation-P620-Main"), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + .vendor_name = "Lenovo", + .product_name = "ThinkStation P620 Main", + .profile_name = "Lenovo-ThinkStation-P620-Main", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_SETUP_DISABLE_AUTOSUSPEND + } }, /* Native Instruments MK2 series */ @@ -3519,14 +3559,40 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), { /* * Pioneer DJ DJM-250MK2 - * PCM is 8 channels out @ 48 fixed (endpoints 0x01). - * The output from computer to the mixer is usable. + * PCM is 8 channels out @ 48 fixed (endpoint 0x01) + * and 8 channels in @ 48 fixed (endpoint 0x82). + * + * Both playback and recording is working, even simultaneously. + * + * Playback channels could be mapped to: + * - CH1 + * - CH2 + * - AUX * - * The input (phono or line to computer) is not working. - * It should be at endpoint 0x82 and probably also 8 channels, - * but it seems that it works only with Pioneer proprietary software. - * Even on officially supported OS, the Audacity was unable to record - * and Mixxx to recognize the control vinyls. + * Recording channels could be mapped to: + * - Post CH1 Fader + * - Post CH2 Fader + * - Cross Fader A + * - Cross Fader B + * - MIC + * - AUX + * - REC OUT + * + * There is remaining problem with recording directly from PHONO/LINE. + * If we map a channel to: + * - CH1 Control Tone PHONO + * - CH1 Control Tone LINE + * - CH2 Control Tone PHONO + * - CH2 Control Tone LINE + * it is silent. + * There is no signal even on other operating systems with official drivers. + * The signal appears only when a supported application is started. + * This needs to be investigated yet... + * (there is quite a lot communication on the USB in both directions) + * + * In current version this mixer could be used for playback + * and for recording from vinyls (through Post CH* Fader) + * but not for DVS (Digital Vinyl Systems) like in Mixxx. */ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { @@ -3550,6 +3616,26 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), .rate_max = 48000, .nr_rates = 1, .rate_table = (unsigned int[]) { 48000 } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 8, // inputs + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC| + USB_ENDPOINT_USAGE_IMPLICIT_FB, + .rates = SNDRV_PCM_RATE_48000, + .rate_min = 48000, + .rate_max = 48000, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 48000 } } }, { @@ -3714,8 +3800,8 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ * they pretend to be 96kHz mono as a workaround for stereo being broken * by that... * - * They also have swapped L-R channels, but that's for userspace to deal - * with. + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index abf99b814a0f819a7d1d3ecd6cf268e19c9082d9..75bbdc6912433796eeea34d9cd5647de59c45576 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -518,6 +518,15 @@ static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip, return 1; /* Continue with creating streams and mixer */ } +static int setup_disable_autosuspend(struct snd_usb_audio *chip, + struct usb_interface *iface, + struct usb_driver *driver, + const struct snd_usb_audio_quirk *quirk) +{ + driver->supports_autosuspend = 0; + return 1; /* Continue with creating streams and mixer */ +} + /* * audio-interface quirks * @@ -557,6 +566,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, [QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk, + [QUIRK_SETUP_DISABLE_AUTOSUSPEND] = setup_disable_autosuspend, }; if (quirk->type < QUIRK_TYPE_COUNT) { @@ -1493,6 +1503,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, set_format_emu_quirk(subs, fmt); break; case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */ + case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */ pioneer_djm_set_format_quirk(subs); break; case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index b91c4c0807eca41fc22753c0cfb6ce3e60b975f8..6839915a01286bb6f9f573055cd91d19eae922a7 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -102,6 +102,7 @@ enum quirk_type { QUIRK_AUDIO_ALIGN_TRANSFER, QUIRK_AUDIO_STANDARD_MIXER, QUIRK_SETUP_FMT_AFTER_RESUME, + QUIRK_SETUP_DISABLE_AUTOSUSPEND, QUIRK_TYPE_COUNT }; diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig index 77777192f6508b6f0dfae842d1f4d97c26d1b720..4ffcc5e623c2225bdda15c688d932bd5b5957623 100644 --- a/sound/x86/Kconfig +++ b/sound/x86/Kconfig @@ -9,7 +9,7 @@ menuconfig SND_X86 if SND_X86 config HDMI_LPE_AUDIO - tristate "HDMI audio without HDaudio on Intel Atom platforms" + tristate "HDMI audio without HDAudio on Intel Atom platforms" depends on DRM_I915 select SND_PCM help diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index ede162f83eea0de3e4c929c50cc1a3a379ee238d..0e9310727281af3f0f8b58d1f6e76f2866548047 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, if (!info->btf_id || !info->nr_func_info || btf__get_from_id(info->btf_id, &prog_btf)) goto print; - finfo = (struct bpf_func_info *)info->func_info; + finfo = u64_to_ptr(info->func_info); func_type = btf__type_by_id(prog_btf, finfo->type_id); if (!func_type || !btf_is_func(func_type)) goto print; diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 8a4c2b3b0cd63ef13bec9cfb4f5573933788cc9b..f6118465363360d50fcdc37d5f2842a9f8204f75 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj, var_name, align); return -EINVAL; } + /* Assume 32-bit architectures when generating data section + * struct memory layout. Given bpftool can't know which target + * host architecture it's emitting skeleton for, we need to be + * conservative and assume 32-bit one to ensure enough padding + * bytes are generated for pointer and long types. This will + * still work correctly for 64-bit architectures, because in + * the worst case we'll generate unnecessary padding field, + * which on 64-bit architectures is not strictly necessary and + * would be handled by natural 8-byte alignment. But it still + * will be a correct memory layout, based on recorded offsets + * in BTF. + */ + if (align > 4) + align = 4; align_off = (off + align - 1) / align * align; if (align_off != need_off) { @@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv) { \n\ struct %1$s *obj; \n\ \n\ - obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\ + obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ if (!obj) \n\ return NULL; \n\ if (%1$s__create_skeleton(obj)) \n\ @@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv) { \n\ struct bpf_object_skeleton *s; \n\ \n\ - s = (typeof(s))calloc(1, sizeof(*s)); \n\ + s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ if (!s) \n\ return -1; \n\ obj->skeleton = s; \n\ @@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv) /* maps */ \n\ s->map_cnt = %zu; \n\ s->map_skel_sz = sizeof(*s->maps); \n\ - s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\ + s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ if (!s->maps) \n\ goto err; \n\ ", @@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv) /* programs */ \n\ s->prog_cnt = %zu; \n\ s->prog_skel_sz = sizeof(*s->progs); \n\ - s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\ + s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ if (!s->progs) \n\ goto err; \n\ ", diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 1b793759170e84e4b820073cfbbdf889f52eacdb..a89f09e3c84855a0623c66ccceccfa9048ecc682 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) switch (info->type) { case BPF_LINK_TYPE_RAW_TRACEPOINT: jsonw_string_field(json_wtr, "tp_name", - (const char *)info->raw_tracepoint.tp_name); + u64_to_ptr(info->raw_tracepoint.tp_name)); break; case BPF_LINK_TYPE_TRACING: err = get_prog_info(info->prog_id, &prog_info); @@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) switch (info->type) { case BPF_LINK_TYPE_RAW_TRACEPOINT: printf("\n\ttp '%s' ", - (const char *)info->raw_tracepoint.tp_name); + (const char *)u64_to_ptr(info->raw_tracepoint.tp_name)); break; case BPF_LINK_TYPE_TRACING: err = get_prog_info(info->prog_id, &prog_info); diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index e3a79b5a996034b064133b9b6587e6e089d058b6..c46e52137b87e800603a478ab9e37eecc46484d1 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -21,7 +21,15 @@ /* Make sure we do not use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 -#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64)(unsigned long)ptr; +} + +static inline void *u64_to_ptr(__u64 ptr) +{ + return (void *)(unsigned long)ptr; +} #define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) #define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index e3b116325403bea81a56a03e58f06b8206440dce..df7d8ec7603616ac915f764b663e65fbeb579cb0 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -134,6 +134,8 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) while (true) { ret = read(fd, buf, sizeof(buf)); if (ret < 0) { + if (errno == EAGAIN) + continue; err = -errno; p_err("failed to read PID iterator output: %d", err); goto out; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 158995d853b04360a059586356f1621056637e94..d393eb8263a6042c10bac1d60e37a1da6b107495 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, p_info("no instructions returned"); return -1; } - buf = (unsigned char *)(info->jited_prog_insns); + buf = u64_to_ptr(info->jited_prog_insns); member_len = info->jited_prog_len; } else { /* DUMP_XLATED */ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { p_err("error retrieving insn dump: kernel.kptr_restrict set?"); return -1; } - buf = (unsigned char *)info->xlated_prog_insns; + buf = u64_to_ptr(info->xlated_prog_insns); member_len = info->xlated_prog_len; } @@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, return -1; } - func_info = (void *)info->func_info; + func_info = u64_to_ptr(info->func_info); if (info->nr_line_info) { prog_linfo = bpf_prog_linfo__new(info); @@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, n = write(fd, buf, member_len); close(fd); - if (n != member_len) { + if (n != (ssize_t)member_len) { p_err("error writing output file: %s", n < 0 ? strerror(errno) : "short write"); return -1; @@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, __u32 i; if (info->nr_jited_ksyms) { kernel_syms_load(&dd); - ksyms = (__u64 *) info->jited_ksyms; + ksyms = u64_to_ptr(info->jited_ksyms); } if (json_output) jsonw_start_array(json_wtr); - lens = (__u32 *) info->jited_func_lens; + lens = u64_to_ptr(info->jited_func_lens); for (i = 0; i < info->nr_jited_func_lens; i++) { if (ksyms) { sym = kernel_syms_search(&dd, ksyms[i]); @@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, } else { kernel_syms_load(&dd); dd.nr_jited_ksyms = info->nr_jited_ksyms; - dd.jited_ksyms = (__u64 *) info->jited_ksyms; + dd.jited_ksyms = u64_to_ptr(info->jited_ksyms); dd.btf = btf; dd.func_info = func_info; dd.finfo_rec_size = info->func_info_rec_size; @@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd) goto out; } - func_info = (struct bpf_func_info *)(info_linear->info.func_info); + func_info = u64_to_ptr(info_linear->info.func_info); t = btf__type_by_id(btf, func_info[0].type_id); if (!t) { p_err("btf %d doesn't have type %d", diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index 4d9ecb97586224d645626a32365647549ef36f2a..0def0bb1f78327ce3e4a201d868b2a9e03014960 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -233,6 +233,39 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) return btf_id__add(root, id, false); } +/* + * The data of compressed section should be aligned to 4 + * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld + * sets sh_addralign to 1, which makes libelf fail with + * misaligned section error during the update: + * FAILED elf_update(WRITE): invalid section alignment + * + * While waiting for ld fix, we fix the compressed sections + * sh_addralign value manualy. + */ +static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh) +{ + int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8; + + if (!(sh->sh_flags & SHF_COMPRESSED)) + return 0; + + if (sh->sh_addralign == expected) + return 0; + + pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n", + sh->sh_addralign, expected); + + sh->sh_addralign = expected; + + if (gelf_update_shdr(scn, sh) == 0) { + printf("FAILED cannot update section header: %s\n", + elf_errmsg(-1)); + return -1; + } + return 0; +} + static int elf_collect(struct object *obj) { Elf_Scn *scn = NULL; @@ -309,6 +342,9 @@ static int elf_collect(struct object *obj) obj->efile.idlist_shndx = idx; obj->efile.idlist_addr = sh.sh_addr; } + + if (compressed_section_fix(elf, scn, &sh)) + return -1; } return 0; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0480f893facd27779648f3331a27a5c1db537722..b6238b2209b71a3d9094b7eec1eb67130175ed62 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -767,7 +767,7 @@ union bpf_attr { * * Also, note that **bpf_trace_printk**\ () is slow, and should * only be used for debugging purposes. For this reason, a notice - * bloc (spanning several lines) is printed to kernel logs and + * block (spanning several lines) is printed to kernel logs and * states that the helper should not be used "for production use" * the first time this helper is used (or more precisely, when * **trace_printk**\ () buffers are allocated). For passing values @@ -1033,14 +1033,14 @@ union bpf_attr { * * int ret; * struct bpf_tunnel_key key = {}; - * + * * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); * if (ret < 0) * return TC_ACT_SHOT; // drop packet - * + * * if (key.remote_ipv4 != 0x0a000001) * return TC_ACT_SHOT; // drop packet - * + * * return TC_ACT_OK; // accept packet * * This interface can also be used with all encapsulation devices @@ -1147,7 +1147,7 @@ union bpf_attr { * Description * Retrieve the realm or the route, that is to say the * **tclassid** field of the destination for the *skb*. The - * indentifier retrieved is a user-provided tag, similar to the + * identifier retrieved is a user-provided tag, similar to the * one used with the net_cls cgroup (see description for * **bpf_get_cgroup_classid**\ () helper), but here this tag is * held by a route (a destination entry), not by a task. diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 077e7ee69e3d8cb22a671817eececec4a5db7637..3e5dcdd48a4998ddc3b59ee77b376d909e345e84 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1196,7 +1196,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 37 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index bc14db706b88a83fd03f069cc5bf74e08eec8e25..e9a4ecddb7a52400a6502957263509637b3cada2 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -40,7 +40,7 @@ * Helper macro to manipulate data structures */ #ifndef offsetof -#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) +#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) \ diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 4843e44916f7c454939ba0c32b174ac3ab077efa..7dfca7016aaa8d9597e0708592dfa6d410d9e3a5 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -41,6 +41,7 @@ struct btf { __u32 types_size; __u32 data_size; int fd; + int ptr_sz; }; static inline __u64 ptr_to_u64(const void *ptr) @@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) return btf->types[type_id]; } +static int determine_ptr_size(const struct btf *btf) +{ + const struct btf_type *t; + const char *name; + int i; + + for (i = 1; i <= btf->nr_types; i++) { + t = btf__type_by_id(btf, i); + if (!btf_is_int(t)) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (!name) + continue; + + if (strcmp(name, "long int") == 0 || + strcmp(name, "long unsigned int") == 0) { + if (t->size != 4 && t->size != 8) + continue; + return t->size; + } + } + + return -1; +} + +static size_t btf_ptr_sz(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; +} + +/* Return pointer size this BTF instance assumes. The size is heuristically + * determined by looking for 'long' or 'unsigned long' integer type and + * recording its size in bytes. If BTF type information doesn't have any such + * type, this function returns 0. In the latter case, native architecture's + * pointer size is assumed, so will be either 4 or 8, depending on + * architecture that libbpf was compiled for. It's possible to override + * guessed value by using btf__set_pointer_size() API. + */ +size_t btf__pointer_size(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + + if (btf->ptr_sz < 0) + /* not enough BTF type info to guess */ + return 0; + + return btf->ptr_sz; +} + +/* Override or set pointer size in bytes. Only values of 4 and 8 are + * supported. + */ +int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) +{ + if (ptr_sz != 4 && ptr_sz != 8) + return -EINVAL; + btf->ptr_sz = ptr_sz; + return 0; +} + static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || btf_is_fwd(t); @@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) size = t->size; goto done; case BTF_KIND_PTR: - size = sizeof(void *); + size = btf_ptr_sz(btf); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: @@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id) switch (kind) { case BTF_KIND_INT: case BTF_KIND_ENUM: - return min(sizeof(void *), (size_t)t->size); + return min(btf_ptr_sz(btf), (size_t)t->size); case BTF_KIND_PTR: - return sizeof(void *); + return btf_ptr_sz(btf); case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: @@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) if (IS_ERR(btf)) goto done; + switch (gelf_getclass(elf)) { + case ELFCLASS32: + btf__set_pointer_size(btf, 4); + break; + case ELFCLASS64: + btf__set_pointer_size(btf, 8); + break; + default: + pr_warn("failed to get ELF class (bitness) for %s\n", path); + break; + } + if (btf_ext && btf_ext_data) { *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index f4a1a1d2b9a3fc7c4c50a7fb2b6cf66cbf26a75c..1ca14448df4cd02bfd7be8a369ae0f02cb464ae6 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); +LIBBPF_API size_t btf__pointer_size(const struct btf *btf); +LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index cf711168d34a2c836634ec94093d0bf06b0d3998..57c00fa639329084d1d7ba060be1d59cf35d40ac 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "btf.h" #include "hashmap.h" #include "libbpf.h" @@ -60,6 +61,7 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + int ptr_sz; bool strip_mods; /* per-type auxiliary state */ @@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf, d->btf_ext = btf_ext; d->printf_fn = printf_fn; d->opts.ctx = opts ? opts->ctx : NULL; + d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->type_names)) { @@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) } } +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t); + static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, @@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) switch (kind) { case BTF_KIND_INT: + /* Emit type alias definitions if necessary */ + btf_dump_emit_missing_aliases(d, id, t); + tstate->emit_state = EMITTED; break; case BTF_KIND_ENUM: @@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, int align, int lvl) { int off_diff = m_off - cur_off; - int ptr_bits = sizeof(void *) * 8; + int ptr_bits = d->ptr_sz * 8; if (off_diff <= 0) /* no gap */ @@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, ": %d", m_sz); off = m_off + m_sz; } else { - m_sz = max(0, btf__resolve_size(d->btf, m->type)); + m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type)); off = m_off + m_sz * 8; } btf_dump_printf(d, ";"); @@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, " __attribute__((packed))"); } +static const char *missing_base_types[][2] = { + /* + * GCC emits typedefs to its internal __PolyX_t types when compiling Arm + * SIMD intrinsics. Alias them to standard base types. + */ + { "__Poly8_t", "unsigned char" }, + { "__Poly16_t", "unsigned short" }, + { "__Poly64_t", "unsigned long long" }, + { "__Poly128_t", "unsigned __int128" }, +}; + +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + int i; + + for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) { + if (strcmp(name, missing_base_types[i][0]) == 0) { + btf_dump_printf(d, "typedef %s %s;\n\n", + missing_base_types[i][1], name); + break; + } + } +} + static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t) { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0a06124f7999ad4484c9b4dc3c7c8ab6467e462f..0ad0b0491e1f2a720bf1a78890242b9b7373d064 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2264,7 +2264,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, data = elf_getdata(scn, NULL); if (!scn || !data) { pr_warn("failed to get Elf_Data from map section %d (%s)\n", - obj->efile.maps_shndx, MAPS_ELF_SEC); + obj->efile.btf_maps_shndx, MAPS_ELF_SEC); return -EINVAL; } @@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj, BTF_ELF_SEC, err); goto out; } + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); err = 0; } if (btf_ext_data) { @@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); bpf_object__sanitize_btf(obj, kern_btf); } @@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj) map = bpf_create_map_xattr(&map_attr); if (map < 0) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", - __func__, cp, errno); - return -errno; + __func__, cp, -ret); + return ret; } insns[0].imm = map; @@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, static int bpf_object__collect_map_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data) { - int i, j, nrels, new_sz, ptr_sz = sizeof(void *); + const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); + int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; const struct btf_member *member; @@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, vi = btf_var_secinfos(sec) + map->btf_var_idx; if (vi->offset <= rel.r_offset && - rel.r_offset + sizeof(void *) <= vi->offset + vi->size) + rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) break; } if (j == obj->nr_maps) { @@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -EINVAL; moff = rel.r_offset - vi->offset - moff; - if (moff % ptr_sz) + /* here we use BPF pointer size, which is always 64 bit, as we + * are parsing ELF that was built for BPF target + */ + if (moff % bpf_ptr_sz) return -EINVAL; - moff /= ptr_sz; + moff /= bpf_ptr_sz; if (moff >= map->init_slots_sz) { new_sz = moff + 1; - tmp = realloc(map->init_slots, new_sz * ptr_sz); + tmp = realloc(map->init_slots, new_sz * host_ptr_sz); if (!tmp) return -ENOMEM; map->init_slots = tmp; memset(map->init_slots + map->init_slots_sz, 0, - (new_sz - map->init_slots_sz) * ptr_sz); + (new_sz - map->init_slots_sz) * host_ptr_sz); map->init_slots_sz = new_sz; } map->init_slots[moff] = targ_map; @@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("failed to pin program: %s\n", cp); - return -errno; + return err; } pr_debug("pinned program '%s'\n", path); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 0c4722bfdd0ac22a87d33162fbdc44df69e3d064..e35bd6cdbdbf85eca0fd7afb4a25c993e1846020 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -295,5 +295,7 @@ LIBBPF_0.1.0 { bpf_program__set_sk_lookup; btf__parse; btf__parse_raw; + btf__pointer_size; btf__set_fd; + btf__set_pointer_size; } LIBBPF_0.0.9; diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 3ba566de821c6b60f0c2c7ddea5468e3292e1ff1..5acc18b326061ffb3d66e01704555014dbb8cb03 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -5259,7 +5259,7 @@ static int print_arg_pointer(struct trace_seq *s, const char *format, int plen, default: ret = 0; val = eval_num_arg(data, size, event, arg); - trace_seq_printf(s, "%p", (void *)val); + trace_seq_printf(s, "%p", (void *)(intptr_t)val); break; } diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 3f72d8e261f3de3a4836afc899f700ad3ca0ad17..bd50cdff08a88bcd1327a9874612a2d54d528bcf 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,6 +33,10 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where 'param1', 'param2', etc are defined as formats for the PMU in /sys/bus/event_source/devices//format/*. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index c9bfefc051fbef6fa0288e842e5bccd3bdc1a13a..db420dd75e4357a106ea456a6ce101c10666758d 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -39,6 +39,10 @@ report:: - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed event like 'pmu/param1=0x3,param2/' where param1 and param2 are defined as formats for the PMU in /sys/bus/event_source/devices//format/* @@ -416,6 +420,9 @@ counts for all hardware threads in a core but show the sum counts per hardware thread. This is essentially a replacement for the any bit and convenient for post processing. +--summary:: +Print summary for interval mode (-I). + EXAMPLES -------- diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c index 8d624aea1c5e546b808db97244c4e2d7b2827bc3..b2924e3181dc3844fa14002463ea4579f5c32e4d 100644 --- a/tools/perf/bench/synthesize.c +++ b/tools/perf/bench/synthesize.c @@ -162,8 +162,8 @@ static int do_run_multi_threaded(struct target *target, init_stats(&event_stats); for (i = 0; i < multi_iterations; i++) { session = perf_session__new(NULL, false, NULL); - if (!session) - return -ENOMEM; + if (IS_ERR(session)) + return PTR_ERR(session); atomic_set(&event_count, 0); gettimeofday(&start, NULL); diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f91352f847c08fcdbe57ed51305b8aa3e9a311a1..772f1057647ff6274f8c0d1f8b8325dff1df4ad3 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2452,7 +2452,7 @@ static struct option __record_options[] = { OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize, "synthesize non-sample events at the end of output"), OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"), - OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"), + OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"), OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq, "Fail if the specified frequency can't be used"), OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'", diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index ece1cddfcd7c79244ede449118fb01aba79de531..3c74c9c0f3c38552e2ec5fa44aa84ec558e13431 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1332,6 +1332,9 @@ int cmd_report(int argc, const char **argv) if (report.mmaps_mode) report.tasks_mode = true; + if (dump_trace) + report.tool.ordered_events = false; + if (quiet) perf_quiet_option(); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 0c7d599fa555ac904bd9d48e48752c1908c15f3c..e6fc297cee91dd868b26fd78af8e32c207c63d56 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -2584,7 +2584,8 @@ static int timehist_sched_change_event(struct perf_tool *tool, } if (!sched->idle_hist || thread->tid == 0) { - timehist_update_runtime_stats(tr, t, tprev); + if (!cpu_list || test_bit(sample->cpu, cpu_bitmap)) + timehist_update_runtime_stats(tr, t, tprev); if (sched->idle_hist) { struct idle_thread_runtime *itr = (void *)tr; @@ -2857,6 +2858,9 @@ static void timehist_print_summary(struct perf_sched *sched, printf("\nIdle stats:\n"); for (i = 0; i < idle_max_cpu; ++i) { + if (cpu_list && !test_bit(i, cpu_bitmap)) + continue; + t = idle_threads[i]; if (!t) continue; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 483a28ef4ec4feefacb087c761ba11373e0a055b..fddc97cac98415698242cf272420f142ae1fefc5 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -404,7 +404,7 @@ static void read_counters(struct timespec *rs) { struct evsel *counter; - if (!stat_config.summary && (read_affinity_counters(rs) < 0)) + if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0)) return; evlist__for_each_entry(evsel_list, counter) { @@ -897,9 +897,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) if (stat_config.walltime_run_table) stat_config.walltime_run[run_idx] = t1 - t0; - if (interval) { + if (interval && stat_config.summary) { stat_config.interval = 0; - stat_config.summary = true; + stat_config.stop_read_counter = true; init_stats(&walltime_nsecs_stats); update_stats(&walltime_nsecs_stats, t1 - t0); @@ -1164,6 +1164,8 @@ static struct option stat_options[] = { "Use with 'percore' event qualifier to show the event " "counts of one hardware thread by sum up total hardware " "threads of same physical core"), + OPT_BOOLEAN(0, "summary", &stat_config.summary, + "print summary for interval mode"), #ifdef HAVE_LIBPFM OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", "libpfm4 event selector. use 'perf list' to list available events", diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 994c230027bb2acac076875fae053a78c96c48e8..7c64134472c7750744c216a1240eea1ea7dc6cb7 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1746,6 +1746,7 @@ int cmd_top(int argc, const char **argv) goto out_delete_evlist; } +#ifdef HAVE_LIBBPF_SUPPORT if (!top.record_opts.no_bpf_event) { top.sb_evlist = evlist__new(); @@ -1759,6 +1760,7 @@ int cmd_top(int argc, const char **argv) goto out_delete_evlist; } } +#endif if (perf_evlist__start_sb_thread(top.sb_evlist, target)) { pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n"); diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index fa86c5f997cc54158aa9ab9c93440c3af06685ad..fc9c158bfa134efa1c9832398c0a62ce0b905f5e 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -137,7 +137,7 @@ static char *fixregex(char *s) return s; /* allocate space for a new string */ - fixed = (char *) malloc(len + 1); + fixed = (char *) malloc(len + esc_count + 1); if (!fixed) return NULL; diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index 5d20bf8397f09d2899f191986f24c601bb313b14..cd77e334e5777d1b6d1fcacf795b132182775973 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -197,7 +197,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void), perf_mmap__read_done(&md->core); } - if (count != expect) { + if (count != expect * evlist->core.nr_entries) { pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count); goto out_delete_evlist; } diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 7f9f87a470c3bcef8c2c985d171d26ccda2f2710..aae0fd9045c1c47d4a23681fbf9bb610ce31236e 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -719,7 +719,7 @@ static int test__group2(struct evlist *evlist) TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); - TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); @@ -842,7 +842,7 @@ static int test__group3(struct evlist *evlist __maybe_unused) TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); - TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip); TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel)); diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c index fc0838a7abc22f3839dffb2e05910eed35b39258..23db8acc492df08afaeae99539fae377c90b8833 100644 --- a/tools/perf/tests/parse-metric.c +++ b/tools/perf/tests/parse-metric.c @@ -70,6 +70,9 @@ static struct pmu_event pme_test[] = { { .metric_expr = "1/m3", .metric_name = "M3", +}, +{ + .name = NULL, } }; diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index be9c4c0549bc83ae0dc2de335284b96e8908b994..a07626f072087d78738a81139a063597ccf0e02b 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -3629,8 +3629,8 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help, { int nr_entries = evlist->core.nr_entries; -single_entry: if (perf_evlist__single_entry(evlist)) { +single_entry: { struct evsel *first = evlist__first(evlist); return perf_evsel__hists_browse(first, nr_entries, help, @@ -3638,6 +3638,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help, env, warn_lost_event, annotation_opts); } + } if (symbol_conf.event_group) { struct evsel *pos; diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c index 302a14d0aca9e50d34abed3fffe65b1047554680..93e063f22be50d89495ee62b374aada54d67d4d8 100644 --- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c +++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c @@ -182,15 +182,15 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder) if (payload & BIT(EV_TLB_ACCESS)) decoder->record.type |= ARM_SPE_TLB_ACCESS; - if ((idx == 1 || idx == 2 || idx == 3) && + if ((idx == 2 || idx == 4 || idx == 8) && (payload & BIT(EV_LLC_MISS))) decoder->record.type |= ARM_SPE_LLC_MISS; - if ((idx == 1 || idx == 2 || idx == 3) && + if ((idx == 2 || idx == 4 || idx == 8) && (payload & BIT(EV_LLC_ACCESS))) decoder->record.type |= ARM_SPE_LLC_ACCESS; - if ((idx == 1 || idx == 2 || idx == 3) && + if ((idx == 2 || idx == 4 || idx == 8) && (payload & BIT(EV_REMOTE_ACCESS))) decoder->record.type |= ARM_SPE_REMOTE_ACCESS; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index c283223fb31f22479ecfa9e46cdc888c902025c4..a2a369e2fbb67eadbf1474d87a1b55e335bb0f8b 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -1344,8 +1344,15 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; } - if (etm->synth_opts.last_branch) + if (etm->synth_opts.last_branch) { attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; + /* + * We don't use the hardware index, but the sample generation + * code uses the new format branch_stack with this field, + * so the event attributes must indicate that it's present. + */ + attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; + } if (etm->synth_opts.instructions) { attr.config = PERF_COUNT_HW_INSTRUCTIONS; diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 2a8d245351e747188c690193a5b6f61ca2e99fed..0af4e81c46e2bea4f4698da74c5774be79b62708 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -3017,8 +3017,15 @@ static int intel_pt_synth_events(struct intel_pt *pt, if (pt->synth_opts.callchain) attr.sample_type |= PERF_SAMPLE_CALLCHAIN; - if (pt->synth_opts.last_branch) + if (pt->synth_opts.last_branch) { attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; + /* + * We don't use the hardware index, but the sample generation + * code uses the new format branch_stack with this field, + * so the event attributes must indicate that it's present. + */ + attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; + } if (pt->synth_opts.instructions) { attr.config = PERF_COUNT_HW_INSTRUCTIONS; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 208b813e00ea49219773352ef6e40e7c47757673..85587de027a51880a647441ca866348867577687 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -736,12 +736,6 @@ int machine__process_switch_event(struct machine *machine __maybe_unused, return 0; } -static int is_bpf_image(const char *name) -{ - return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 || - strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0; -} - static int machine__process_ksymbol_register(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 1d7210804639fdc0bbc9f17bf9ce838da97a4852..cc0faf8f132194f7a4f9185f3eae37cbece69f2e 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -267,6 +267,22 @@ bool __map__is_bpf_prog(const struct map *map) return name && (strstr(name, "bpf_prog_") == name); } +bool __map__is_bpf_image(const struct map *map) +{ + const char *name; + + if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) + return true; + + /* + * If PERF_RECORD_KSYMBOL is not included, the dso will not have + * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can + * guess the type based on name. + */ + name = map->dso->short_name; + return name && is_bpf_image(name); +} + bool __map__is_ool(const struct map *map) { return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL; diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 9e312ae2d656339dfe01e97a2e28ca70fe36d267..c2f5d28fe73ac4956fc70e41a500e52059c819bd 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -147,12 +147,14 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, bool __map__is_kernel(const struct map *map); bool __map__is_extra_kernel_map(const struct map *map); bool __map__is_bpf_prog(const struct map *map); +bool __map__is_bpf_image(const struct map *map); bool __map__is_ool(const struct map *map); static inline bool __map__is_kmodule(const struct map *map) { return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) && - !__map__is_bpf_prog(map) && !__map__is_ool(map); + !__map__is_bpf_prog(map) && !__map__is_ool(map) && + !__map__is_bpf_image(map); } bool map__has_symbols(const struct map *map); @@ -164,4 +166,9 @@ static inline bool is_entry_trampoline(const char *name) return !strcmp(name, ENTRY_TRAMPOLINE_NAME); } +static inline bool is_bpf_image(const char *name) +{ + return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 || + strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0; +} #endif /* __PERF_MAP_H */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 9f7260e691134ef6a1f696d515c768e42d6c7e9d..c4d2394e2b2dc60f63ff12954587960bad087b39 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -37,6 +37,7 @@ #include "util/evsel_config.h" #include "util/event.h" #include "util/pfm.h" +#include "perf.h" #define MAX_NAME_LEN 100 @@ -1533,19 +1534,23 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel = __add_event(list, &parse_state->idx, &attr, true, get_config_name(head_config), pmu, &config_terms, auto_merge_stats, NULL); - if (evsel) { - evsel->unit = info.unit; - evsel->scale = info.scale; - evsel->per_pkg = info.per_pkg; - evsel->snapshot = info.snapshot; - evsel->metric_expr = info.metric_expr; - evsel->metric_name = info.metric_name; - evsel->pmu_name = name ? strdup(name) : NULL; - evsel->use_uncore_alias = use_uncore_alias; - evsel->percore = config_term_percore(&evsel->config_terms); - } + if (!evsel) + return -ENOMEM; + + evsel->pmu_name = name ? strdup(name) : NULL; + evsel->use_uncore_alias = use_uncore_alias; + evsel->percore = config_term_percore(&evsel->config_terms); - return evsel ? 0 : -ENOMEM; + if (parse_state->fake_pmu) + return 0; + + evsel->unit = info.unit; + evsel->scale = info.scale; + evsel->per_pkg = info.per_pkg; + evsel->snapshot = info.snapshot; + evsel->metric_expr = info.metric_expr; + evsel->metric_name = info.metric_name; + return 0; } int parse_events_multi_pmu_add(struct parse_events_state *parse_state, @@ -1794,6 +1799,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str, if (*str == 'u') { if (!exclude) exclude = eu = ek = eh = 1; + if (!exclude_GH && !perf_guest) + eG = 1; eu = 0; } else if (*str == 'k') { if (!exclude) diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index b9fb91fdc5de91771316688c51c827b41c92791b..645bf4f1859fd76b03310913eb0dcebc41a11350 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -511,7 +511,7 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc list = alloc_list(); ABORT_ON(!list); err = parse_events_add_breakpoint(list, &parse_state->idx, - (void *) $2, $6, $4); + (void *)(uintptr_t) $2, $6, $4); free($6); if (err) { free(list); @@ -528,7 +528,7 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc list = alloc_list(); ABORT_ON(!list); if (parse_events_add_breakpoint(list, &parse_state->idx, - (void *) $2, NULL, $4)) { + (void *)(uintptr_t) $2, NULL, $4)) { free(list); YYABORT; } @@ -544,7 +544,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc list = alloc_list(); ABORT_ON(!list); err = parse_events_add_breakpoint(list, &parse_state->idx, - (void *) $2, $4, 0); + (void *)(uintptr_t) $2, $4, 0); free($4); if (err) { free(list); @@ -561,7 +561,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc list = alloc_list(); ABORT_ON(!list); if (parse_events_add_breakpoint(list, &parse_state->idx, - (void *) $2, NULL, 0)) { + (void *)(uintptr_t) $2, NULL, 0)) { free(list); YYABORT; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index ffbc9d35a383f717d254c680aabe26ea4e7dc0fd..7a5f03764702bfb56b470e5eb7a6958d2a0ec387 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -87,7 +87,7 @@ static int perf_session__process_compressed_event(struct perf_session *session, session->decomp_last = decomp; } - pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size); + pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size); return 0; } diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 57d0706e1330a239c4678105b865ab60f08cec0d..493ec372fdec4152df02ae02990b4b33effa1a1d 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -117,7 +117,7 @@ static void aggr_printout(struct perf_stat_config *config, cpu_map__id_to_die(id), config->csv_output ? 0 : -3, cpu_map__id_to_cpu(id), config->csv_sep); - } else { + } else if (id > -1) { fprintf(config->output, "CPU%*d%s", config->csv_output ? 0 : -7, evsel__cpus(evsel)->map[id], diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index f8778cffd9419b9e43350717fa038485b40a91c3..aa3bed48511b328957cfb7c6bd6b6ac5c013a681 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -113,6 +113,7 @@ struct perf_stat_config { bool summary; bool metric_no_group; bool metric_no_merge; + bool stop_read_counter; FILE *output; unsigned int interval; unsigned int timeout; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1f5fcb828a212ebad87a94fc0595e9602dc4afbe..5151a8c0b791af18d7cb0fcbca87ebe357a887b7 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -663,6 +663,7 @@ static bool symbol__is_idle(const char *name) "exit_idle", "mwait_idle", "mwait_idle_with_hints", + "mwait_idle_with_hints.constprop.0", "poll_idle", "ppc64_runlatch_off", "pseries_dedicated_idle_sleep", diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c index d2202392ffdbb328b9d08c4995c96faaa1a082fa..48dd2b018c47a7fcbe4a5e78e3d43bfc03d349b2 100644 --- a/tools/perf/util/zstd.c +++ b/tools/perf/util/zstd.c @@ -99,7 +99,7 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size while (input.pos < input.size) { ret = ZSTD_decompressStream(data->dstream, &output, &input); if (ZSTD_isError(ret)) { - pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n", + pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n", src_size, output.size, dst_size, ZSTD_getErrorName(ret)); break; } diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 1bb204cee853fca160fdffa110b167394b5222b4..9a0946ddb705acdbf148297444c0cefd13e95e40 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -6,7 +6,6 @@ test_lpm_map test_tag FEATURE-DUMP.libbpf fixdep -test_align test_dev_cgroup /test_progs* test_tcpbpf_user diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index a83b5827532f777291d682961911cd3f4b29db1a..fc946b7ac288df9064b551071fe2d7df22e06fe9 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -32,7 +32,7 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ + test_verifier_log test_dev_cgroup test_tcpbpf_user \ test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ test_cgroup_storage \ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \ diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index 7afa4160416f6a649cc41d9d171567341acc8fe4..284d5921c3458b82688ec92bd48b673ecb4e50ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -159,15 +159,15 @@ void test_bpf_obj_id(void) /* Check getting link info */ info_len = sizeof(struct bpf_link_info) * 2; bzero(&link_infos[i], info_len); - link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name; + link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name); link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]), &link_infos[i], &info_len); if (CHECK(err || link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || link_infos[i].prog_id != prog_infos[i].id || - link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name || - strcmp((char *)link_infos[i].raw_tracepoint.tp_name, + link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) || + strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter") || info_len != sizeof(struct bpf_link_info), "get-link-info(fd)", @@ -178,7 +178,7 @@ void test_bpf_obj_id(void) link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, link_infos[i].id, link_infos[i].prog_id, prog_infos[i].id, - (char *)link_infos[i].raw_tracepoint.tp_name, + (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter")) goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index cb33a7ee4e04f0f62e21b9ea5c815b9ba3f1a746..39fb81d9daeb5fdc0129f4ffe533a67e48c246ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args) static struct btf_dump_test_case { const char *name; const char *file; + bool known_ptr_sz; struct btf_dump_opts opts; } btf_dump_test_cases[] = { - {"btf_dump: syntax", "btf_dump_test_case_syntax", {}}, - {"btf_dump: ordering", "btf_dump_test_case_ordering", {}}, - {"btf_dump: padding", "btf_dump_test_case_padding", {}}, - {"btf_dump: packing", "btf_dump_test_case_packing", {}}, - {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}}, - {"btf_dump: multidim", "btf_dump_test_case_multidim", {}}, - {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}}, + {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}}, + {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}}, + {"btf_dump: padding", "btf_dump_test_case_padding", true, {}}, + {"btf_dump: packing", "btf_dump_test_case_packing", true, {}}, + {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}}, + {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}}, + {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}}, }; static int btf_dump_all_types(const struct btf *btf, @@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t) goto done; } + /* tests with t->known_ptr_sz have no "long" or "unsigned long" type, + * so it's impossible to determine correct pointer size; but if they + * do, it should be 8 regardless of host architecture, becaues BPF + * target is always 64-bit + */ + if (!t->known_ptr_sz) { + btf__set_pointer_size(btf, 8); + } else { + CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n", + 8, btf__pointer_size(btf)); + } + snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file); fd = mkstemp(out_file); if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) { diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c index b093787e944895c23b6302440a4580b1d38e0d68..1931a158510e01952c613050ac539039d6d4d2ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_extern.c +++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c @@ -159,8 +159,8 @@ void test_core_extern(void) exp = (uint64_t *)&t->data; for (j = 0; j < n; j++) { CHECK(got[j] != exp[j], "check_res", - "result #%d: expected %lx, but got %lx\n", - j, exp[j], got[j]); + "result #%d: expected %llx, but got %llx\n", + j, (__u64)exp[j], (__u64)got[j]); } cleanup: test_core_extern__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 084ed26a7d78cb44976d20d076c7beefd981e1d3..a54eafc5e4b31538ccfb16cdda150f5107f7b9e5 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -237,8 +237,8 @@ .union_sz = sizeof(((type *)0)->union_field), \ .arr_sz = sizeof(((type *)0)->arr_field), \ .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ - .ptr_sz = sizeof(((type *)0)->ptr_field), \ - .enum_sz = sizeof(((type *)0)->enum_field), \ + .ptr_sz = 8, /* always 8-byte pointer for BPF */ \ + .enum_sz = sizeof(((type *)0)->enum_field), \ } #define SIZE_CASE(name) { \ @@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = { .sb4 = -1, .sb20 = -0x17654321, .u32 = 0xBEEF, - .s32 = -0x3FEDCBA987654321, + .s32 = -0x3FEDCBA987654321LL, }), BITFIELDS_CASE(bitfields___bitfield_vs_int, { - .ub1 = 0xFEDCBA9876543210, + .ub1 = 0xFEDCBA9876543210LL, .ub2 = 0xA6, - .ub7 = -0x7EDCBA987654321, - .sb4 = -0x6123456789ABCDE, - .sb20 = 0xD00D, + .ub7 = -0x7EDCBA987654321LL, + .sb4 = -0x6123456789ABCDELL, + .sb20 = 0xD00DLL, .u32 = -0x76543, - .s32 = 0x0ADEADBEEFBADB0B, + .s32 = 0x0ADEADBEEFBADB0BLL, }), BITFIELDS_CASE(bitfields___just_big_enough, { - .ub1 = 0xF, - .ub2 = 0x0812345678FEDCBA, + .ub1 = 0xFLL, + .ub2 = 0x0812345678FEDCBALL, }), BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index a895bfed55db04ecedd03652149d11d2edd48943..197d0d217b56be50db470fb237930a2d22a3b61b 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, __u32 duration = 0, retval; struct bpf_map *data_map; const int zero = 0; - u64 *result = NULL; + __u64 *result = NULL; err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, &pkt_obj, &pkt_fd); @@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, link = calloc(sizeof(struct bpf_link *), prog_cnt); prog = calloc(sizeof(struct bpf_program *), prog_cnt); - result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64)); + result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64)); if (CHECK(!link || !prog || !result, "alloc_memory", "failed to alloc memory")) goto close_prog; @@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, goto close_prog; for (i = 0; i < prog_cnt; i++) - if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", + if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n", result[i])) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index f11f187990e95b87cb81d8457a976d8bb86f9fdd..cd6dc80edf18eb29f3193e317f43eb4fb82886f9 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -591,7 +591,7 @@ void test_flow_dissector(void) CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || err || tattr.retval != 1, tests[i].name, - "err %d errno %d retval %d duration %d size %u/%lu\n", + "err %d errno %d retval %d duration %d size %u/%zu\n", err, errno, tattr.retval, tattr.duration, tattr.data_size_out, sizeof(flow_keys)); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index e3cb62b0a110ec81b2be13b5134e283856b95965..9efa7e50eab27f7d11aa0dbddd9e86610522cc0c 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -5,7 +5,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) { int i, err, map_fd; - uint64_t num; + __u64 num; map_fd = bpf_find_map(__func__, obj, "result_number"); if (CHECK_FAIL(map_fd < 0)) @@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) struct { char *name; uint32_t key; - uint64_t num; + __u64 num; } tests[] = { { "relocate .bss reference", 0, 0 }, { "relocate .data reference", 1, 42 }, @@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); CHECK(err || num != tests[i].num, tests[i].name, - "err %d result %lx expected %lx\n", + "err %d result %llx expected %llx\n", err, num, tests[i].num); } } diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c index 43d0b5578f461fa1664c2f9160cb401d7a279f42..9c3c5c0f068fb521897794aa050b841df90ff347 100644 --- a/tools/testing/selftests/bpf/prog_tests/mmap.c +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -21,7 +21,7 @@ void test_mmap(void) const long page_size = sysconf(_SC_PAGE_SIZE); int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd; struct bpf_map *data_map, *bss_map; - void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; + void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2; struct test_mmap__bss *bss_data; struct bpf_map_info map_info; __u32 map_info_sz = sizeof(map_info); @@ -183,16 +183,23 @@ void test_mmap(void) /* check some more advanced mmap() manipulations */ + tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, + -1, 0); + if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno)) + goto cleanup; + /* map all but last page: pages 1-3 mapped */ - tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, + tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, data_map_fd, 0); - if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) + if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) { + munmap(tmp0, 4 * page_size); goto cleanup; + } /* unmap second page: pages 1, 3 mapped */ err = munmap(tmp1 + page_size, page_size); if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { - munmap(tmp1, map_sz); + munmap(tmp1, 4 * page_size); goto cleanup; } @@ -201,7 +208,7 @@ void test_mmap(void) MAP_SHARED | MAP_FIXED, data_map_fd, 0); if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { munmap(tmp1, page_size); - munmap(tmp1 + 2*page_size, page_size); + munmap(tmp1 + 2*page_size, 2 * page_size); goto cleanup; } CHECK(tmp1 + page_size != tmp2, "adv_mmap4", @@ -211,7 +218,7 @@ void test_mmap(void) tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, data_map_fd, 0); if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { - munmap(tmp1, 3 * page_size); /* unmap page 1 */ + munmap(tmp1, 4 * page_size); /* unmap page 1 */ goto cleanup; } CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c index dde2b7ae7bc9e9bee8b342f260e88fc524aab5bf..935a294f049a23fc060b8bbb4358114a07229647 100644 --- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c @@ -28,7 +28,7 @@ void test_prog_run_xattr(void) "err %d errno %d retval %d\n", err, errno, tattr.retval); CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", - "incorrect output size, want %lu have %u\n", + "incorrect output size, want %zu have %u\n", sizeof(pkt_v4), tattr.data_size_out); CHECK_ATTR(buf[5] != 0, "overflow", diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index c571584c00f5bc196e66d814473d50b622faa2a4..9ff0412e1fd384fab4cd2c16de31a894cedb0e89 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss) v6->sin6_addr.s6_addr[10] = 0xff; v6->sin6_addr.s6_addr[11] = 0xff; memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); + memset(&v6->sin6_addr.s6_addr[0], 0, 10); } static int udp_recv_send(int server_fd) diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index 25de86af2d033adc17a456a51062364da4594c5a..fafeddaad6a998e33ea99d95be5440fd6ed32558 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -81,7 +81,7 @@ void test_skb_ctx(void) CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), "ctx_size_out", - "incorrect output size, want %lu have %u\n", + "incorrect output size, want %zu have %u\n", sizeof(skb), tattr.ctx_size_out); for (i = 0; i < 5; i++) diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 25b068591e9a40a42a5e62a4613f05b2081e2e77..193002b14d7f66ac9c1b6942f79d662760278ac7 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -19,7 +19,7 @@ static int libbpf_debug_print(enum libbpf_print_level level, log_buf = va_arg(args, char *); if (!log_buf) goto out; - if (strstr(log_buf, err_str) == 0) + if (err_str && strstr(log_buf, err_str) == 0) found = true; out: printf(format, log_buf); diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c index c75525eab02c52e7e9b2e27c3bccd2e6583ec312..dd324b4933db48639388196ceadb22379d7f1f04 100644 --- a/tools/testing/selftests/bpf/prog_tests/varlen.c +++ b/tools/testing/selftests/bpf/prog_tests/varlen.c @@ -44,25 +44,25 @@ void test_varlen(void) CHECK_VAL(bss->payload1_len2, size2); CHECK_VAL(bss->total1, size1 + size2); CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); CHECK_VAL(data->payload2_len1, size1); CHECK_VAL(data->payload2_len2, size2); CHECK_VAL(data->total2, size1 + size2); CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); CHECK_VAL(data->payload3_len1, size1); CHECK_VAL(data->payload3_len2, size2); CHECK_VAL(data->total3, size1 + size2); CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); CHECK_VAL(data->payload4_len1, size1); CHECK_VAL(data->payload4_len2, size2); CHECK_VAL(data->total4, size1 + size2); CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); cleanup: test_varlen__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index 34d84717c9464351e8380596910d04a6b4cd589c..69139ed6621645262fe24313bf85466e467b1567 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -1,5 +1,10 @@ #include #include + +void preserce_ptr_sz_fn(long x) {} + +#define __bpf_aligned __attribute__((aligned(8))) + /* * KERNEL */ @@ -444,51 +449,51 @@ struct core_reloc_primitives { char a; int b; enum core_reloc_primitives_enum c; - void *d; - int (*f)(const char *); + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___diff_enum_def { char a; int b; - void *d; - int (*f)(const char *); + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; enum { X = 100, Y = 200, - } c; /* inline enum def with differing set of values */ + } c __bpf_aligned; /* inline enum def with differing set of values */ }; struct core_reloc_primitives___diff_func_proto { - void (*f)(int); /* incompatible function prototype */ - void *d; - enum core_reloc_primitives_enum c; + void (*f)(int) __bpf_aligned; /* incompatible function prototype */ + void *d __bpf_aligned; + enum core_reloc_primitives_enum c __bpf_aligned; int b; char a; }; struct core_reloc_primitives___diff_ptr_type { - const char * const d; /* different pointee type + modifiers */ - char a; + const char * const d __bpf_aligned; /* different pointee type + modifiers */ + char a __bpf_aligned; int b; enum core_reloc_primitives_enum c; - int (*f)(const char *); + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___err_non_enum { char a[1]; int b; int c; /* int instead of enum */ - void *d; - int (*f)(const char *); + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___err_non_int { char a[1]; - int *b; /* ptr instead of int */ - enum core_reloc_primitives_enum c; - void *d; - int (*f)(const char *); + int *b __bpf_aligned; /* ptr instead of int */ + enum core_reloc_primitives_enum c __bpf_aligned; + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___err_non_ptr { @@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr { int b; enum core_reloc_primitives_enum c; int d; /* int instead of ptr */ - int (*f)(const char *); + int (*f)(const char *) __bpf_aligned; }; /* @@ -507,7 +512,7 @@ struct core_reloc_mods_output { }; typedef const int int_t; -typedef const char *char_ptr_t; +typedef const char *char_ptr_t __bpf_aligned; typedef const int arr_t[7]; struct core_reloc_mods_substruct { @@ -523,9 +528,9 @@ typedef struct { struct core_reloc_mods { int a; int_t b; - char *c; + char *c __bpf_aligned; char_ptr_t d; - int e[3]; + int e[3] __bpf_aligned; arr_t f; struct core_reloc_mods_substruct g; core_reloc_mods_substruct_t h; @@ -535,9 +540,9 @@ struct core_reloc_mods { struct core_reloc_mods___mod_swap { int b; int_t a; - char *d; + char *d __bpf_aligned; char_ptr_t c; - int f[3]; + int f[3] __bpf_aligned; arr_t e; struct { int y; @@ -555,7 +560,7 @@ typedef arr1_t arr2_t; typedef arr2_t arr3_t; typedef arr3_t arr4_t; -typedef const char * const volatile fancy_char_ptr_t; +typedef const char * const volatile fancy_char_ptr_t __bpf_aligned; typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt; @@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs { arr4_t e; fancy_char_ptr_t d; fancy_char_ptr_t c; - int3_t b; + int3_t b __bpf_aligned; int3_t a; }; @@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change { int8_t sb4: 1; /* 4 -> 1 */ int32_t sb20: 30; /* 20 -> 30 */ /* non-bitfields */ - uint16_t u32; /* 32 -> 16 */ - int64_t s32; /* 32 -> 64 */ + uint16_t u32; /* 32 -> 16 */ + int64_t s32 __bpf_aligned; /* 32 -> 64 */ }; /* turn bitfield into non-bitfield and vice versa */ struct core_reloc_bitfields___bitfield_vs_int { uint64_t ub1; /* 3 -> 64 non-bitfield */ uint8_t ub2; /* 20 -> 8 non-bitfield */ - int64_t ub7; /* 7 -> 64 non-bitfield signed */ - int64_t sb4; /* 4 -> 64 non-bitfield signed */ - uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */ - int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ - uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */ + int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */ + int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */ + uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */ + int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ + uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */ }; struct core_reloc_bitfields___just_big_enough { diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 1f1966e86e9f9e9a815a00e27b79eb83509cf4af..3e6912e4df3df6c99e773564469ef389e6f8708d 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -54,6 +54,7 @@ SEC("sockops") int bpf_testcb(struct bpf_sock_ops *skops) { char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)]; + struct bpf_sock_ops *reuse = skops; struct tcphdr *thdr; int good_call_rv = 0; int bad_call_rv = 0; @@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops) int v = 0; int op; + /* Test reading fields in bpf_sock_ops using single register */ + asm volatile ( + "%[reuse] = *(u32 *)(%[reuse] +96)" + : [reuse] "+r"(reuse) + :); + + asm volatile ( + "%[op] = *(u32 *)(%[skops] +96)" + : [op] "+r"(op) + : [skops] "r"(skops) + :); + + asm volatile ( + "r9 = %[skops];\n" + "r8 = *(u32 *)(r9 +164);\n" + "*(u32 *)(r9 +164) = r8;\n" + :: [skops] "r"(skops) + : "r9", "r8"); + + asm volatile ( + "r1 = %[skops];\n" + "r1 = *(u64 *)(r1 +184);\n" + "if r1 == 0 goto +1;\n" + "r1 = *(u32 *)(r1 +4);\n" + :: [skops] "r"(skops):"r1"); + + asm volatile ( + "r9 = %[skops];\n" + "r9 = *(u64 *)(r9 +184);\n" + "if r9 == 0 goto +1;\n" + "r9 = *(u32 *)(r9 +4);\n" + :: [skops] "r"(skops):"r9"); + + asm volatile ( + "r1 = %[skops];\n" + "r2 = *(u64 *)(r1 +184);\n" + "if r2 == 0 goto +1;\n" + "r2 = *(u32 *)(r2 +4);\n" + :: [skops] "r"(skops):"r1", "r2"); + op = (int) skops->op; update_event_map(op); diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c index cd4b72c55dfeaf8b9d4719bf497ceee8ad0a4f28..913acdffd90ff698d88f49dc2273993176f491b6 100644 --- a/tools/testing/selftests/bpf/progs/test_varlen.c +++ b/tools/testing/selftests/bpf/progs/test_varlen.c @@ -15,9 +15,9 @@ int test_pid = 0; bool capture = false; /* .bss */ -long payload1_len1 = 0; -long payload1_len2 = 0; -long total1 = 0; +__u64 payload1_len1 = 0; +__u64 payload1_len2 = 0; +__u64 total1 = 0; char payload1[MAX_LEN + MAX_LEN] = {}; /* .data */ diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 305fae8f80a98acc6709c60f5fd9407098775f09..c75fc6447186ab2e77b58afb6d83cf94a0db4854 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num) info_garbage.garbage = 0; err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); if (CHECK(err || info_len != sizeof(*info), - "err:%d errno:%d info_len:%u sizeof(*info):%lu", + "err:%d errno:%d info_len:%u sizeof(*info):%zu", err, errno, info_len, sizeof(*info))) { err = -1; goto done; @@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num) if (CHECK(err || !info.id || info_len != sizeof(info) || info.btf_size != raw_btf_size || (ret = memcmp(raw_btf, user_btf, expected_nbytes)), - "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d", + "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d", err, errno, info.id, info_len, sizeof(info), raw_btf_size, info.btf_size, expected_nbytes, ret)) { err = -1; @@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, nexpected_line = snprintf(expected_line, line_size, "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," - "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," + "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," "%u,0x%x,[[%d,%d],[%d,%d]]}\n", percpu_map ? "\tcpu" : "", percpu_map ? cpu : next_key, @@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, v->unused_bits2a, v->bits28, v->unused_bits2b, - v->ui64, + (__u64)v->ui64, v->ui8a[0], v->ui8a[1], v->ui8a[2], v->ui8a[3], v->ui8a[4], v->ui8a[5], diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 754cf611723ee53f759bcf7a88f989033431ae1a..0d92ebcb335d1b7637641c07594c99cf3312b9fb 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1274,6 +1274,8 @@ static void __run_parallel(unsigned int tasks, pid_t pid[tasks]; int i; + fflush(stdout); + for (i = 0; i < tasks; i++) { pid[i] = fork(); if (pid[i] == 0) { diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index b1e4dadacd9b40b360fac66c064f14272713d87d..22943b58d752ae12baf9b4db75ceff36ee4b5205 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -618,7 +618,9 @@ int cd_flavor_subdir(const char *exec_name) if (!flavor) return 0; flavor++; - fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor); + if (env.verbosity > VERBOSE_NONE) + fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor); + return chdir(flavor); } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 6e09bf738473e4dc3f34b56eea78bb047de325dc..dbb820dde1383157fd375b09c907bb54421143b4 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr) return (__u64) (unsigned long) ptr; } +static inline void *u64_to_ptr(__u64 ptr) +{ + return (void *) (unsigned long) ptr; +} + int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c index 8162c58a1234e87b1f761713f122c207adc6b91a..b8d14f9db5f9ef32037687e36e981a8226bb12a0 100644 --- a/tools/testing/selftests/kvm/x86_64/debug_regs.c +++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c @@ -40,11 +40,11 @@ static void guest_code(void) /* Single step test, covers 2 basic instructions and 2 emulated */ asm volatile("ss_start: " - "xor %%rax,%%rax\n\t" + "xor %%eax,%%eax\n\t" "cpuid\n\t" "movl $0x1a0,%%ecx\n\t" "rdmsr\n\t" - : : : "rax", "ecx"); + : : : "eax", "ebx", "ecx", "edx"); /* DR6.BD test */ asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax"); diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index 18c5de53558afc859d47793e8466e62ebd88f078..bf361f30d6ef912e0b857914c31a419a960fa9c2 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -180,6 +180,8 @@ setup() ;; r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1 ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1 + ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0 + ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0 ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1 ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10 diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh index d3e0809ab3681fa40cff983c7e48be9d6643eca6..431296c0f91cf6093c6e7c2fc43c6fba67d354f8 100755 --- a/tools/testing/selftests/netfilter/nft_flowtable.sh +++ b/tools/testing/selftests/netfilter/nft_flowtable.sh @@ -2,13 +2,18 @@ # SPDX-License-Identifier: GPL-2.0 # # This tests basic flowtable functionality. -# Creates following topology: +# Creates following default topology: # # Originator (MTU 9000) <-Router1-> MTU 1500 <-Router2-> Responder (MTU 2000) # Router1 is the one doing flow offloading, Router2 has no special # purpose other than having a link that is smaller than either Originator # and responder, i.e. TCPMSS announced values are too large and will still # result in fragmentation and/or PMTU discovery. +# +# You can check with different Orgininator/Link/Responder MTU eg: +# nft_flowtable.sh -o8000 -l1500 -r2000 +# + # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -21,29 +26,17 @@ ns2out="" log_netns=$(sysctl -n net.netfilter.nf_log_all_netns) -nft --version > /dev/null 2>&1 -if [ $? -ne 0 ];then - echo "SKIP: Could not run test without nft tool" - exit $ksft_skip -fi - -ip -Version > /dev/null 2>&1 -if [ $? -ne 0 ];then - echo "SKIP: Could not run test without ip tool" - exit $ksft_skip -fi - -which nc > /dev/null 2>&1 -if [ $? -ne 0 ];then - echo "SKIP: Could not run test without nc (netcat)" - exit $ksft_skip -fi +checktool (){ + if ! $1 > /dev/null 2>&1; then + echo "SKIP: Could not $2" + exit $ksft_skip + fi +} -ip netns add nsr1 -if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace" - exit $ksft_skip -fi +checktool "nft --version" "run test without nft tool" +checktool "ip -Version" "run test without ip tool" +checktool "which nc" "run test without nc (netcat)" +checktool "ip netns add nsr1" "create net namespace" ip netns add ns1 ip netns add ns2 @@ -89,11 +82,41 @@ ip -net nsr2 addr add dead:2::1/64 dev veth1 # ns2 is going via nsr2 with a smaller mtu, so that TCPMSS announced by both peers # is NOT the lowest link mtu. -ip -net nsr1 link set veth0 mtu 9000 -ip -net ns1 link set eth0 mtu 9000 +omtu=9000 +lmtu=1500 +rmtu=2000 + +usage(){ + echo "nft_flowtable.sh [OPTIONS]" + echo + echo "MTU options" + echo " -o originator" + echo " -l link" + echo " -r responder" + exit 1 +} + +while getopts "o:l:r:" o +do + case $o in + o) omtu=$OPTARG;; + l) lmtu=$OPTARG;; + r) rmtu=$OPTARG;; + *) usage;; + esac +done + +if ! ip -net nsr1 link set veth0 mtu $omtu; then + exit 1 +fi + +ip -net ns1 link set eth0 mtu $omtu + +if ! ip -net nsr2 link set veth1 mtu $rmtu; then + exit 1 +fi -ip -net nsr2 link set veth1 mtu 2000 -ip -net ns2 link set eth0 mtu 2000 +ip -net ns2 link set eth0 mtu $rmtu # transfer-net between nsr1 and nsr2. # these addresses are not used for connections. @@ -113,7 +136,10 @@ for i in 1 2; do ip -net ns$i route add default via 10.0.$i.1 ip -net ns$i addr add dead:$i::99/64 dev eth0 ip -net ns$i route add default via dead:$i::1 - ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null + if ! ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then + echo "ERROR: Check Originator/Responder values (problem during address addition)" + exit 1 + fi # don't set ip DF bit for first two tests ip netns exec ns$i sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null @@ -147,7 +173,7 @@ table inet filter { # as PMTUd is off. # This rule is deleted for the last test, when we expect PMTUd # to kick in and ensure all packets meet mtu requirements. - meta length gt 1500 accept comment something-to-grep-for + meta length gt $lmtu accept comment something-to-grep-for # next line blocks connection w.o. working offload. # we only do this for reverse dir, because we expect packets to @@ -171,15 +197,13 @@ if [ $? -ne 0 ]; then fi # test basic connectivity -ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null -if [ $? -ne 0 ];then +if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then echo "ERROR: ns1 cannot reach ns2" 1>&2 bash exit 1 fi -ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null -if [ $? -ne 0 ];then +if ! ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then echo "ERROR: ns2 cannot reach ns1" 1>&2 exit 1 fi @@ -196,7 +220,6 @@ ns2out=$(mktemp) make_file() { name=$1 - who=$2 SIZE=$((RANDOM % (1024 * 8))) TSIZE=$((SIZE * 1024)) @@ -215,8 +238,7 @@ check_transfer() out=$2 what=$3 - cmp "$in" "$out" > /dev/null 2>&1 - if [ $? -ne 0 ] ;then + if ! cmp "$in" "$out" > /dev/null 2>&1; then echo "FAIL: file mismatch for $what" 1>&2 ls -l "$in" ls -l "$out" @@ -243,17 +265,21 @@ test_tcp_forwarding_ip() sleep 3 - kill $lpid - kill $cpid + if ps -p $lpid > /dev/null;then + kill $lpid + fi + + if ps -p $cpid > /dev/null;then + kill $cpid + fi + wait - check_transfer "$ns1in" "$ns2out" "ns1 -> ns2" - if [ $? -ne 0 ];then + if ! check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"; then lret=1 fi - check_transfer "$ns2in" "$ns1out" "ns1 <- ns2" - if [ $? -ne 0 ];then + if ! check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"; then lret=1 fi @@ -282,13 +308,12 @@ test_tcp_forwarding_nat() return $lret } -make_file "$ns1in" "ns1" -make_file "$ns2in" "ns2" +make_file "$ns1in" +make_file "$ns2in" # First test: # No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed. -test_tcp_forwarding ns1 ns2 -if [ $? -eq 0 ] ;then +if test_tcp_forwarding ns1 ns2; then echo "PASS: flow offloaded for ns1/ns2" else echo "FAIL: flow offload for ns1/ns2:" 1>&2 @@ -319,9 +344,7 @@ table ip nat { } EOF -test_tcp_forwarding_nat ns1 ns2 - -if [ $? -eq 0 ] ;then +if test_tcp_forwarding_nat ns1 ns2; then echo "PASS: flow offloaded for ns1/ns2 with NAT" else echo "FAIL: flow offload for ns1/ns2 with NAT" 1>&2 @@ -333,8 +356,7 @@ fi # Same as second test, but with PMTU discovery enabled. handle=$(ip netns exec nsr1 nft -a list table inet filter | grep something-to-grep-for | cut -d \# -f 2) -ip netns exec nsr1 nft delete rule inet filter forward $handle -if [ $? -ne 0 ] ;then +if ! ip netns exec nsr1 nft delete rule inet filter forward $handle; then echo "FAIL: Could not delete large-packet accept rule" exit 1 fi @@ -342,8 +364,7 @@ fi ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null -test_tcp_forwarding_nat ns1 ns2 -if [ $? -eq 0 ] ;then +if test_tcp_forwarding_nat ns1 ns2; then echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery" else echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2 @@ -389,8 +410,7 @@ ip -net ns2 route del 192.168.10.1 via 10.0.2.1 ip -net ns2 route add default via 10.0.2.1 ip -net ns2 route add default via dead:2::1 -test_tcp_forwarding ns1 ns2 -if [ $? -eq 0 ] ;then +if test_tcp_forwarding ns1 ns2; then echo "PASS: ipsec tunnel mode for ns1/ns2" else echo "FAIL: ipsec tunnel mode for ns1/ns2" diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore index 91c775c23c66089173d77013f99986e199f30b08..aac4a59f9e2813c460953413e74fa3bd5aaa7e39 100644 --- a/tools/testing/selftests/powerpc/mm/.gitignore +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -2,6 +2,7 @@ hugetlb_vs_thp_test subpage_prot tempfile +prot_sao segv_errors wild_bctr large_vm_fork_separation diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 250ce172e0daccc471209ae0576dd2185a63d710..defe488d6bf111b5469e7c9059e0e92c91603360 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -2,7 +2,7 @@ noarg: $(MAKE) -C ../ -TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ large_vm_fork_separation bad_accesses pkey_exec_prot \ pkey_siginfo stack_expansion_signal stack_expansion_ldst @@ -14,6 +14,8 @@ include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c ../utils.c +$(OUTPUT)/prot_sao: ../utils.c + $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c new file mode 100644 index 0000000000000000000000000000000000000000..e0cf8ebbf8cd5ca39b3dcb10191d34ff47c308df --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/prot_sao.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2016, Michael Ellerman, IBM Corp. + */ + +#include +#include +#include +#include + +#include + +#include "utils.h" + +#define SIZE (64 * 1024) + +int test_prot_sao(void) +{ + char *p; + + /* SAO was introduced in 2.06 and removed in 3.1 */ + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) || + have_hwcap2(PPC_FEATURE2_ARCH_3_1)); + + /* + * Ensure we can ask for PROT_SAO. + * We can't really verify that it does the right thing, but at least we + * confirm the kernel will accept it. + */ + p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + FAIL_IF(p == MAP_FAILED); + + /* Write to the mapping, to at least cause a fault */ + memset(p, 0xaa, SIZE); + + return 0; +} + +int main(void) +{ + return test_harness(test_prot_sao, "prot-sao"); +} diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 7656c7ce79d905f1b35c112996c8c292618a4c9a..0e73a16874c4c5b121e8bdef48aa889ad5a21e85 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile @@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \ TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS) +TEST_FILES := settings include ../lib.mk diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings new file mode 100644 index 0000000000000000000000000000000000000000..e7b9417537fbc4626153b72e8f295ab4594c844b --- /dev/null +++ b/tools/testing/selftests/timers/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index c41f24b517f401c34fab8ae05a9d4ddf481eaa0b..65c141ebfbbde8ded6f4d9af1ebe879a616b6e95 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -462,6 +462,17 @@ static int test_vsys_x(void) return 0; } +/* + * Debuggers expect ptrace() to be able to peek at the vsyscall page. + * Use process_vm_readv() as a proxy for ptrace() to test this. We + * want it to work in the vsyscall=emulate case and to fail in the + * vsyscall=xonly case. + * + * It's worth noting that this ABI is a bit nutty. write(2) can't + * read from the vsyscall page on any kernel version or mode. The + * fact that ptrace() ever worked was a nice courtesy of old kernels, + * but the code to support it is fairly gross. + */ static int test_process_vm_readv(void) { #ifdef __x86_64__ @@ -477,8 +488,12 @@ static int test_process_vm_readv(void) remote.iov_len = 4096; ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0); if (ret != 4096) { - printf("[OK]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno); - return 0; + /* + * We expect process_vm_readv() to work if and only if the + * vsyscall page is readable. + */ + printf("[%s]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", vsyscall_map_r ? "FAIL" : "OK", ret, errno); + return vsyscall_map_r ? 1 : 0; } if (vsyscall_map_r) { @@ -488,6 +503,9 @@ static int test_process_vm_readv(void) printf("[FAIL]\tIt worked but returned incorrect data\n"); return 1; } + } else { + printf("[FAIL]\tprocess_rm_readv() succeeded, but it should have failed in this configuration\n"); + return 1; } #endif diff --git a/tools/usb/Build b/tools/usb/Build new file mode 100644 index 0000000000000000000000000000000000000000..2ad6f97458168377c5b903f7e1c2217419d0ab6a --- /dev/null +++ b/tools/usb/Build @@ -0,0 +1,2 @@ +testusb-y += testusb.o +ffs-test-y += ffs-test.o diff --git a/tools/usb/Makefile b/tools/usb/Makefile index 01d758d73b6db1b442d85387871442cdf9c39017..1b128e551b2e428d0cb5cbb3c862be0e9d7d2963 100644 --- a/tools/usb/Makefile +++ b/tools/usb/Makefile @@ -1,14 +1,51 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for USB tools +include ../scripts/Makefile.include -PTHREAD_LIBS = -lpthread -WARNINGS = -Wall -Wextra -CFLAGS = $(WARNINGS) -g -I../include -LDFLAGS = $(PTHREAD_LIBS) +bindir ?= /usr/bin -all: testusb ffs-test -%: %.c - $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + +# Do not use make's built-in rules +# (this improves performance and avoids hard-to-debug behaviour); +MAKEFLAGS += -r + +override CFLAGS += -O2 -Wall -Wextra -g -D_GNU_SOURCE -I$(OUTPUT)include -I$(srctree)/tools/include +override LDFLAGS += -lpthread + +ALL_TARGETS := testusb ffs-test +ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS)) + +all: $(ALL_PROGRAMS) + +export srctree OUTPUT CC LD CFLAGS +include $(srctree)/tools/build/Makefile.include + +TESTUSB_IN := $(OUTPUT)testusb-in.o +$(TESTUSB_IN): FORCE + $(Q)$(MAKE) $(build)=testusb +$(OUTPUT)testusb: $(TESTUSB_IN) + $(QUIET_LINK)$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS) + +FFS_TEST_IN := $(OUTPUT)ffs-test-in.o +$(FFS_TEST_IN): FORCE + $(Q)$(MAKE) $(build)=ffs-test +$(OUTPUT)ffs-test: $(FFS_TEST_IN) + $(QUIET_LINK)$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS) clean: - $(RM) testusb ffs-test + rm -f $(ALL_PROGRAMS) + find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.o.cmd' -delete + +install: $(ALL_PROGRAMS) + install -d -m 755 $(DESTDIR)$(bindir); \ + for program in $(ALL_PROGRAMS); do \ + install $$program $(DESTDIR)$(bindir); \ + done + +FORCE: + +.PHONY: all install clean FORCE prepare diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 737666db02de5bcc0028c55f52931d5d06c03d38..cf88233b819a0803e7f47dd85552a9275b135a68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -482,7 +482,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); + need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, + range->flags); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) @@ -4331,7 +4332,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { - int i; + int i, j; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); @@ -4348,17 +4349,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), GFP_KERNEL_ACCOUNT); - if (!new_bus) { + if (new_bus) { + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + } else { pr_err("kvm: failed to shrink bus, removing it completely\n"); - goto broken; + for (j = 0; j < bus->dev_count; j++) { + if (j == i) + continue; + kvm_iodevice_destructor(bus->range[j].dev); + } } - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); - new_bus->dev_count--; - memcpy(new_bus->range + i, bus->range + i + 1, - (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); - -broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus);